from aiohttp import payload
from influxdb_client import InfluxDBClient
import pytz
import json
import os
import time
import requests
from datetime import datetime, timedelta, timezone
import uuid
from dataclasses import dataclass, asdict
from typing import Dict, List, Any, Optional
from APP.models.alert_list import alert_list
from APP.utils.Match import match_type_id, match_field
import pymysql
from pymysql.cursors import DictCursor
from APP.database import db
import redis
# 添加MySQL配置（根据实际配置修改）
# 正确声明SQLAlchemy实例


# 配置参数w
url = "http://192.168.31.169:8086"
token = "RMKNHJIJnESmEgsj4mEP9vvQOJkx1qhE2bAHHH0EzAe01i_D8PYemJbO0BWbvjGPC-BSpunSoGk08XWTEWh51g=="
org = "dccec83301863fb4"
bucket = "kylin"
# 在配置参数区域添加Redis配置
REDIS_CONFIG = {
    'redis_host': 'localhost',
    'redis_port': 6379,
    'redis_db': 0,
    'redis_password': None
}

try:
    redis_client = redis.Redis(
        host=REDIS_CONFIG['redis_host'],
        port=REDIS_CONFIG['redis_port'],
        db=REDIS_CONFIG['redis_db'],
        password=REDIS_CONFIG['redis_password'],
        decode_responses=False  # 保持二进制数据格式
    )
    # 测试连接
    if not redis_client.ping():
        raise ConnectionError("Redis连接失败")
except Exception as e:
    print(f"Redis初始化失败: {str(e)}")
    redis_client = None
#缓存上一次的累计值，key包含设备名，确保设备区分
last_diskio_iotime = {}

# 异常状态管理
active_anomalies: Dict[str, Dict] = {}

# 评判系统配置
COMPLEXITY_CRITERIA = {
    "severity_scores": {"warning": 1, "alert": 2, "critical": 3},
    # 持续时间
    "duration_thresholds": {
        "transient": 10,  # 10秒
        "persistent": 20,  # 20秒
        "long_term": 30,  # 30秒
        "chronic": 60  # 1分钟
    },
    # 出现频率
    "frequency_thresholds": {
        "occasional": 5,  # 1-5次
        "frequent": 10,  # 6-10次
        "high": 20,  # 11-20次
        "critical": 21  # >20次
    }
}

# 获取当前脚本所在目录
current_dir = os.path.dirname( os.path.dirname(os.path.abspath(__file__)))
# 使用相对路径定位规则文件
rules_path = os.path.join(current_dir, 'exception_rules.json')  # 改为config目录


# 加载规则
with open(rules_path, 'r', encoding='utf-8') as f:
    rules = json.load(f)


RED_EXCLAMATION = "!" * 13

# ---------------------------
# 核心数据结构（数据载体）
# ---------------------------
@dataclass
class AnomalyData:
    """统一封装异常特征数据，包含：
    - 基础信息：id/类型/设备/发生时间
    - 指标数据：当前值/阈值/持续时间
    - 影响评估：业务影响/关联服务
    - 分析结果：复杂度评分/处理建议
    """
    id: str
    type: str
    measurement: str
    field: str
    severity: str
    metrics: Dict[str, float]
    start_time: datetime
    duration: int
    error_frequency: int
    business_impact: str
    affected_services: List[str]
    device: str
    tags: Dict[str, Any]
    threshold: float
    current_value: float
    complexity_score: int
    logs: List[str]

# ---------------------------
# 异常评判引擎（核心算法）
# ---------------------------
class AnomalyEvaluator:
    """异常评判器"""

    @staticmethod
    def calculate_complexity_score(anomaly_data: AnomalyData) -> int:
        """五维评分模型：综合评估异常严重程度"""
        complexity_score = 0

        # 1. 严重度评分
        severity_scores = COMPLEXITY_CRITERIA["severity_scores"]
        complexity_score += severity_scores.get(anomaly_data.severity, 1)

        # 2. 持续时间评分
        duration = anomaly_data.duration
        if duration > COMPLEXITY_CRITERIA["duration_thresholds"]["chronic"]:
            complexity_score += 4
        elif duration > COMPLEXITY_CRITERIA["duration_thresholds"]["long_term"]:
            complexity_score += 3
        elif duration > COMPLEXITY_CRITERIA["duration_thresholds"]["persistent"]:
            complexity_score += 2
        elif duration > COMPLEXITY_CRITERIA["duration_thresholds"]["transient"]:
            complexity_score += 1

        # 3. 错误频率评分
        error_freq = anomaly_data.error_frequency
        if error_freq > COMPLEXITY_CRITERIA["frequency_thresholds"]["critical"]:
            complexity_score += 4
        elif error_freq > COMPLEXITY_CRITERIA["frequency_thresholds"]["high"]:
            complexity_score += 3
        elif error_freq > COMPLEXITY_CRITERIA["frequency_thresholds"]["frequent"]:
            complexity_score += 2
        elif error_freq > COMPLEXITY_CRITERIA["frequency_thresholds"]["occasional"]:
            complexity_score += 1

        # 4. 业务影响评分
        impact_scores = {"minor": 1, "moderate": 2, "major": 3, "critical": 4}
        complexity_score += impact_scores.get(anomaly_data.business_impact, 1)

        # 5. 影响服务数量评分
        service_count = len(anomaly_data.affected_services)
        if service_count > 3:
            complexity_score += 3
        elif service_count > 1:
            complexity_score += 2
        else:
            complexity_score += 1

        return complexity_score

    @staticmethod
    def determine_severity(complexity_score: int) -> str:
        """根据复杂度分数确定严重度"""
        if complexity_score <= 5:
            return "warning"  # 预警，不进入自动化运维
        elif complexity_score <= 10:
            return "alert"  # 警报，进入自动化运维
        else:
            return "critical"  # 严重，进入自动化运维

    @staticmethod
    def determine_business_impact(measurement: str, severity: str) -> str:
        """确定业务影响"""
        critical_measurements = ["cpu", "mem", "disk", "net", "diskio", "system_metrics"]

        if severity == "warning":
            return "minor"  # 预警 = 轻微影响
        elif severity == "alert":
            return "moderate"  # 警报 = 中等影响
        elif severity == "critical":
            return "major"  # 严重 = 严重影响
        else:
            return "minor"

# 业务影响评估：当检测到异常时，快速识别哪些业务服务可能受到影响
    @staticmethod
    def get_affected_services(measurement: str, device: str) -> List[str]:
        """获取受影响的服务"""
        service_mapping = {
            "cpu": ["web", "api", "database"],
            "memory": ["web", "api", "cache"],
            "disk": ["database", "storage"],
            "diskio": ["database", "storage"],
            "network": ["web", "api", "database"],
            "system": ["general"]
        }
        return service_mapping.get(measurement, ["general"])


def is_abnormal(value, threshold, condition):
    if condition == "gt":
        return value > threshold
    elif condition == "lt":
        return value < threshold
    else:
        return False


def is_abnormal_multi(value, rules_list):
    for rule in rules_list:
        threshold = rule.get("threshold")
        condition = rule.get("condition")
        if is_abnormal(value, threshold, condition):
            return True
    return False


def query_new_data(client, bucket, org, start_time):
    """构建InfluxDB查询管道，获取时序数据"""
    # 构建Flux查询语句
    query = f'''
    from(bucket:"{bucket}")
      |> range(start: {start_time})
      |> sort(columns: ["_time"])
    '''
    query_api = client.query_api()
    result = query_api.query(query=query)
    records = []
    for table in result:
        for record in table.records:
            records.append(record)
    return records


def extract_tags(record):
    """数据清洗：提取业务相关标签"""
    # 过滤InfluxDB内部字段，保留业务标签
    tags = {k: v for k, v in record.values.items() if k not in ['_time', '_measurement', '_field', '_value']}
    return tags


def create_anomaly_id(measurement: str, field: str, device: str) -> str:
    """创建异常ID"""
    return f"{measurement}_{field}_{device}_{int(time.time())}"


def update_anomaly_state(anomaly_id: str, measurement: str, field: str, value: float,
                         threshold: float, device: str, tags: Dict[str, Any], log_message: str):
    """更新异常状态"""
    current_time = datetime.now(timezone.utc)

    if anomaly_id not in active_anomalies:
        # 新异常
        active_anomalies[anomaly_id] = {
            "measurement": measurement,
            "field": field,
            "start_time": current_time,
            "threshold": threshold,
            "current_value": value,
            "metrics": {field: value},
            "error_count": 1,
            "device": device,
            "tags": tags,
            "last_update": current_time,
            "logs": [log_message]  # 记录日志
        }
    else:
        # 更新现有异常
        anomaly = active_anomalies[anomaly_id]
        anomaly["current_value"] = value
        anomaly["error_count"] += 1
        anomaly["last_update"] = current_time
        anomaly["logs"].append(log_message)  # 追加日志
        anomaly = active_anomalies[anomaly_id]
        anomaly["metrics"][field] = value  # 存储原始指标值
        anomaly["current_value"] = value




def should_send_to_llm(anomaly_id: str) -> bool:
    """判断是否应该发送给LLM"""
    if anomaly_id not in active_anomalies:
        return False

    anomaly = active_anomalies[anomaly_id]
    duration = (anomaly["last_update"] - anomaly["start_time"]).total_seconds()
    error_count = anomaly["error_count"]

    critical_measurements = ["cpu", "mem", "disk"]
    is_critical = anomaly["measurement"] in critical_measurements

    # 满足以下条件之一时发送给LLM：
    # 1. 持续时间超过30秒
    # 2. 错误次数超过5次
    # 3. 严重异常（CPU、内存等关键指标）
    return (duration > 30 or error_count > 5 or is_critical)

# 在payload构建处添加datetime处理逻辑
def datetime_serializer(obj):
    if isinstance(obj, datetime):
        return obj.isoformat()
    raise TypeError(f"Type {type(obj)} not serializable")
# 新增：批量发送异常给LLM
def send_batch_anomalies_to_llm():
    """批量存储异常到Redis"""
    if not active_anomalies:
        return

    # 收集所有需要发送的异常
    anomalies_to_send = []
    for anomaly_id, anomaly in active_anomalies.items():
        if should_send_to_llm(anomaly_id):
            current_time = datetime.now(timezone.utc)
            duration = int((current_time - anomaly["start_time"]).total_seconds())

            # 日志去重处理（保留最后50条不重复日志）
            unique_logs = []
            seen = set()
            for log in reversed(anomaly["logs"]):  # 逆序处理保留最新日志
                signature = log[:100]  # 取前100字符作为特征
                if signature not in seen:
                    seen.add(signature)
                    unique_logs.append(log)
                if len(unique_logs) >= 25:  # 最多保留25条
                    break
            unique_logs.reverse()  # 恢复时间顺序
            # 创建异常数据（保持原有逻辑）
            anomaly_data = AnomalyData(
                id=anomaly_id,
                type=anomaly["measurement"],
                measurement=anomaly["measurement"],
                field=anomaly["field"],
                severity="warning",
                metrics=anomaly["metrics"],
                start_time=anomaly["start_time"],
                duration=duration,
                error_frequency=anomaly["error_count"],
                business_impact="minor",
                affected_services=AnomalyEvaluator.get_affected_services(
                    anomaly["measurement"], anomaly["device"]
                ),
                device=anomaly["device"],
                tags=anomaly["tags"],
                threshold=anomaly["threshold"],
                current_value=anomaly["current_value"],
                complexity_score=0,
                logs=unique_logs
            )
            # 更新原始日志（保留最后5条用于后续分析）
            active_anomalies[anomaly_id]["logs"] = anomaly["logs"][-5:]

            anomalies_to_send.append(anomaly_data)

            # 计算复杂度分数（保持原有逻辑）
            complexity_score = AnomalyEvaluator.calculate_complexity_score(anomaly_data)
            anomaly_data.complexity_score = complexity_score

            # 确定严重度和业务影响（保持原有逻辑）
            severity = AnomalyEvaluator.determine_severity(complexity_score)
            business_impact = AnomalyEvaluator.determine_business_impact(
                anomaly["measurement"], severity
            )

            anomaly_data.severity = severity
            anomaly_data.business_impact = business_impact

            anomalies_to_send.append(anomaly_data)

    if anomalies_to_send and redis_client:
        try:
            # 修改后的过滤条件（包含所有三个级别）
            filtered_anomalies = [
                anomaly for anomaly in anomalies_to_send
                if anomaly.severity in ['warning', 'alert', 'critical']
            ]

            # 修改后：当过滤后的数据中不包含alert/critical时触发提示
            if not any(a.severity in ['alert', 'critical'] for a in filtered_anomalies) and len(anomalies_to_send) > 0:
                print("!!! 仅有预警级别数据（不包含警报/严重）！！！")
                print(f"当前预警数据量: {len(anomalies_to_send)}条")
                return
            # 为每个异常单独生成一个请求ID并存储
            timestamp = int(datetime.now(timezone.utc).timestamp())
            stored_count = 0

            for anomaly in filtered_anomalies:
                # 为每个异常生成唯一ID
                # 使用年月日时分秒格式作为唯一ID
                # 转换为北京时间 (UTC+8)
                beijing_time = datetime.now(timezone(timedelta(hours=8)))
                formatted_time = beijing_time.strftime("%Y%m%d%H%M%S")  # 例如: 20250725135145
                anomaly_request_id = f"req_{formatted_time}"

                # 构建单个异常的payload
                payload = {
                    "anomalies": [
                        {
                            "id": anomaly.id,
                            "type": anomaly.type,
                            "measurement": anomaly.measurement,
                            "field": anomaly.field,
                            "severity": anomaly.severity,
                            "metrics": anomaly.metrics,
                            "start_time": anomaly.start_time.isoformat(),
                            "duration": anomaly.duration,
                            "error_frequency": anomaly.error_frequency,
                            "business_impact": anomaly.business_impact,
                            "affected_services": anomaly.affected_services,
                            "device": anomaly.device,
                            "tags": {k: v.isoformat() if isinstance(v, datetime) else v
                                     for k, v in anomaly.tags.items()},
                            "threshold": anomaly.threshold,
                            "current_value": anomaly.current_value,
                            "complexity_score": anomaly.complexity_score,
                            "logs": anomaly.logs[-10:]  # 最多保留10条日志
                        }
                    ],
                    "analysis_request": {
                        "request_id": anomaly_request_id,
                        "timestamp": datetime.now(timezone.utc).isoformat(),
                        "priority": "high" if anomaly.severity == 'critical' else "normal"
                    }
                }

                # 存储到Redis
                redis_key = f"anomaly_data:{anomaly_request_id}"
                redis_value = json.dumps(payload)

                # 设置24小时过期时间
                redis_client.setex(redis_key, 86400, redis_value)
                stored_count += 1
            print(f"成功存储{stored_count}条异常到Redis")
            # print(f"成功存储{len(payload['anomalies'])}条异常到Redis，键名: {redis_key}")
            # 原有数据库操作逻辑保持不变
            alerts_to_add = []
            processed_combinations = set()  # 用于跟踪已处理的(measurement, field, time)组合
            
            # # 先查询现有的danger_show记录，用于获取历史数据
            # existing_danger_shows = DangerShow.query.all()
            danger_show_map = {}  # 用于映射(measurement, field)到danger_show记录
            
            # # 构建现有的danger_show映射
            # for danger_show in existing_danger_shows:
            #     # 从event_name中提取measurement和field
            #     try:
            #         # 假设event_name格式为"{measurement}.{field}出现异常告警"
            #         event_parts = danger_show.event_name.split('.')
            #         if len(event_parts) >= 2:
            #             measurement_part = event_parts[0]
            #             field_part = event_parts[1].split('出现异常告警')[0]
            #             danger_show_map[(measurement_part, field_part)] = danger_show
            #     except Exception as e:
            #         print(f"解析danger_show event_name失败: {e}")
        
            for anomaly in filtered_anomalies:
                is_alert = 1 if anomaly.severity == 'critical' else 0
                measurement = anomaly.measurement
                field = anomaly.field
                type_id = match_type_id(measurement, field)
                field_info = match_field(type_id)

                # 创建alert_list记录
                alert_time = anomaly.start_time.astimezone(pytz.timezone('Asia/Shanghai')).replace(tzinfo=None)
                
                # 检查是否已经处理过相同的(measurement, field, time)组合
                combination_key = (measurement, field, alert_time)
                if combination_key in processed_combinations:
                    continue  # 跳过重复记录
                processed_combinations.add(combination_key)  # 添加到已处理集合

                new_alert = alert_list(
                    type_id=type_id,
                    status=0,
                    severity=anomaly.severity,
                    measurement=measurement,
                    title=field_info[2],
                    content=field_info[3],
                    field=field,
                    time=anomaly.start_time.astimezone(pytz.timezone('Asia/Shanghai')).replace(tzinfo=None),
                    value=anomaly.current_value,
                    is_alert=is_alert,
                    danger_data=json.dumps([{
                        "name": f"{measurement}.{field}",
                        "begin": alert_time.strftime("%H:%M:%S"),
                        "end": None
                    }]),
                    work_note=None
                )
                alerts_to_add.append(new_alert)


        
            # 批量添加alert_list记录
            if alerts_to_add:
                db.session.bulk_save_objects(alerts_to_add)

        except Exception as e:
            print(f"Redis存储失败: {str(e)}")
            db.session.rollback()

    # 清理已解决的异常（保持原有逻辑）
    cleanup_resolved_anomalies()


def cleanup_resolved_anomalies():
    """清理已解决的异常"""
    current_time = datetime.now(timezone.utc)
    resolved_anomalies = []

    # 查找已经解决的异常（超过一定时间没有更新）
    for anomaly_id, anomaly in active_anomalies.items():
        time_since_last_update = (current_time - anomaly["last_update"]).total_seconds()
        # 如果超过60秒没有更新，则认为异常已解决
        if time_since_last_update > 60:
            resolved_anomalies.append(anomaly_id)

    # 从活跃异常列表中移除已解决的异常
    for anomaly_id in resolved_anomalies:
        del active_anomalies[anomaly_id]

    if resolved_anomalies:
        print(f"清理了 {len(resolved_anomalies)} 个已解决的异常")
