from datetime import datetime, timedelta
from APP.models.alert_list import alert_list
from APP.database import db
import json

from APP.pojo import redangerList
from APP.pojo.listCount import listCount
from APP.pojo.listData import listData
from APP.pojo.redangerList import redangerList
from flask import current_app, jsonify, Blueprint
from APP import socketio  # 修改导入路径
from APP.utils.Match import match_field

dangerouslist = Blueprint('dangerouslist', __name__)


path = '/dangerBoard/getDangerousList'
# 格式化数据的函数
def format_alert_data(records,time_list,total_count,important_count,measurement_data):
    """将数据库记录格式化为指定JSON格式"""
    if not isinstance(records, list) or len(records) == 0:
        current_app.logger.error("无效的输入数据")
        return {
            "message": "Data format error",
            "code": 500,
            "data": {}
        }
    # 初始化计数器：count0=未处理警告，count1=0，count2=总警告数
    counter = listCount(0, 0, 0)

    #根据
    list_data = []
    for record in records:
        # 统计逻辑调整
        if record.severity in ("alert", "critical"):   # 根据实际状态字段判断未处理警告
            counter.count0 += 1
        counter.count2 += 1  # 总警告数

        # 使用listData类封装单条记录
        list_data.append(
            listData(
                id=record.id,
                status=record.severity,
                title=record.title,
                content=record.content,
                time=record.time.strftime("%Y-%m-%d %H:%M:%S")
            ).to_dict()
        )
    redangerLists = []
    current_app.logger.debug(f"redangerList类型: {type(redangerList)}")  # 添加类型检查
    for record in records:
        try:
            import os
            current_dir = os.path.dirname(os.path.abspath(__file__))
            project_root = os.path.abspath(os.path.join(current_dir, '../../'))
            json_path = current_app.config.get(
                'EXCEPTION_RULES_PATH',
                os.path.join(project_root, 'InfluxDB_setting/exception_rules.json')
            )

            with open(json_path, 'r') as file:
                rules = json.load(file)

                # 修复2：解构match_field返回的元组
                measurement = match_field(record.type_id)[0]
                field_name = match_field(record.type_id)[1]
                # 修复3：正确获取字段规则（根据实际JSON结构）

                field_rule = rules.get(measurement, {}).get(field_name, {})

                # 处理多种规则类型
                if isinstance(field_rule, list):  # 处理diskio.io_time等嵌套规则
                    threshold = field_rule[0].get("threshold", 0)  # 取第一个规则的阈值
                elif isinstance(field_rule, dict):  # 普通单规则
                    threshold = field_rule.get("threshold", 0)
                else:  # 防御性编程
                    current_app.logger.error(f"无效规则格式: {type(field_rule)}")
                    continue

                clamped_percentage = 0.0  # 确保变量始终有初始值

                # 分母有效性检查
                if record.value == 0 or threshold == 0:
                    current_app.logger.error(
                        f"无效计算参数: measurement={measurement}, field={field_name}, value={record.value}, threshold={threshold}")
                    continue  # 跳过当前记录处理

                # 计算偏差百分比
                deviation = threshold - record.value
                if threshold > 0:
                    percentage = (deviation / threshold) * 100
                    clamped_percentage = max(min(percentage, 1000), -1000)
                else:
                    current_app.logger.warning(f"无效计算参数: threshold={threshold}, value={record.value}")
                    clamped_percentage = 0.0

                # 将格式化后的数据添加到列表（调整到计算之后）
                redangerLists.append(
                    redangerList(
                        id=record.id,
                        name=record.title,
                        message=f"{record.content} {clamped_percentage:+.2f}%",
                        timestamp=record.time.strftime("%H:%M:%S")
                    ).to_dict()
                )

        except Exception as e:
            current_app.logger.error(f"处理redangerList记录出错: {str(e)}")

    # 构建符合要求的JSON结构
    return {
        "message": "Success",
        "code": 200,
        "data": {
            "dangerList":{
                "listCount": counter.to_dict(),
                "listData": list_data
            },
            "redangerList": redangerLists,
            "dangerChartData":{
                "timedata":time_list,
                "alldanger":total_count,
                "imdanger":important_count
            },
            "redangerChartData":measurement_data
        }
    }

# 修改connect处理器
@socketio.on('connect')
def handle_connect():
    print('Client connected')
    # 获取应用实例并启动后台任务
    # APP = current_app._get_current_object()
    # socketio.start_background_task(target=periodic_alert_task, APP=APP)

@socketio.on('disconnect')
def handle_disconnect():
    print('Client disconnected')

# 看连接是否成功
@socketio.on('client_server1')
def periodic_alert_task(message):
    print(message)
    app = current_app._get_current_object()
    """周期性发送告警的后台任务"""
    with app.app_context():
        while True:
            try:
                sen_alert_periofically()  # 调用实际业务函数
            except Exception as e:
                error_msg = {"message": str(e), "code": 500}
                socketio.emit('alert_error1', json.dumps(error_msg))
                current_app.logger.exception("后台任务异常")  # 添加日志记录
            socketio.sleep(10)

# @socketio.on('my_event')
# 发送信息的主要函数
def sen_alert_periofically():
    time_threshold = datetime.now() - timedelta(minutes=2)
    recent_alerts = db.session.query(alert_list).filter(
        alert_list.time >= time_threshold,
        alert_list.severity.in_(['alert', 'critical'])
    ).all()

    if not recent_alerts:
        current_app.logger.info("近期没有告警数据")
        return
    # 查询最近五天的日期列表
    date_sequence = [datetime.now().date() - timedelta(days=i) for i in range(4, -1, -1)]
    time_list = [date.strftime("%Y-%m-%d") for date in date_sequence]

    # 修正日期序列生成逻辑
    five_days_ago = datetime.now() - timedelta(days=4)  # 调整为4天前获取完整5天

    date_stats = db.session.query(
        db.func.date(alert_list.time).label('date'),
        db.func.count(alert_list.id).label('total'),
        db.func.sum(db.case(
            (alert_list.severity.in_(['alert', 'critical']), 1),  # 同时统计两种状态
            else_=0
        )).label('important')
    ).filter(
        alert_list.time >= five_days_ago
    ).group_by(
        db.func.date(alert_list.time)
    ).order_by(
        db.func.date(alert_list.time).asc()
    ).all()

    # 添加调试输出
    current_app.logger.debug(f"原始统计结果: {date_stats}")

    # 生成连续五天的日期序列
    date_sequence = [five_days_ago.date() + timedelta(days=i) for i in range(5)]

    # 生成完整的日期序列
    daily_stats = {stat.date: {"total": stat.total, "important": stat.important} for stat in date_stats}
    total_count = []
    important_count = []

    # 生成连续五天的日期序列
    date_sequence = [five_days_ago.date() + timedelta(days=i) for i in range(5)]

    for single_date in date_sequence:
        stats = daily_stats.get(single_date, {"total": 0, "important": 0})
        total_count.append(stats["total"])
        important_count.append(stats["important"])

    # 查询五天内各measurement的数量
    measurement_stats = db.session.query(
        alert_list.measurement,
        db.func.count(alert_list.id).label('count')
    ).filter(
        alert_list.time >= five_days_ago
    ).group_by(
        alert_list.measurement
    ).all()

    # 转换为字典格式 {measurement: count}
    measurement_counts = {measurement: count for measurement, count in measurement_stats}
    measurement_data = [
        {"name": measurement.capitalize(), "value": count}
        for measurement, count in measurement_counts.items()
    ]
    formatted_data = format_alert_data(recent_alerts, time_list, total_count, important_count,measurement_data)

    socketio.emit(
        'alert_update1',
        json.dumps(formatted_data, default=str)
    )
