import threading
import time
import logging
import asyncio
import aiohttp
from concurrent.futures import ThreadPoolExecutor
from django_redis import get_redis_connection
from django.utils import timezone
from django.conf import settings
from .xiaozhi_bot import xiaozhi

logger = logging.getLogger(__name__)


class QdasMonitor:
    def __init__(self):
        self.check_interval = settings.QDAS_MONITOR_INTERVAL or 60  # 默认60秒
        self.batch_size = settings.QDAS_BATCH_SIZE or 100  # 每批处理的特征数量
        self.thread_pool_size = settings.QDAS_THREAD_POOL_SIZE or 5  # 线程池大小
        self.redis_conn = get_redis_connection("qdas_monitor")
        self.thread_pool = ThreadPoolExecutor(max_workers=self.thread_pool_size)

    def start(self):
        """启动监控线程"""
        monitor_thread = threading.Thread(target=self.run_monitoring, daemon=True)
        monitor_thread.start()
        logger.info(f"Qdas 监控线程已启动，循环周期：{self.check_interval}秒，批处理大小：{self.batch_size}，线程池大小：{self.thread_pool_size}")

    def run_monitoring(self):
        """运行监控循环"""
        logger.info("Qdas 启动循环...")
        while True:
            try:
                self.monitor_all_features()
            except Exception as e:
                logger.error(f"监控过程中出现错误: {e}", exc_info=True)
            time.sleep(self.check_interval)

    def monitor_all_features(self):
        """批量监控所有特征点"""
        from .models import EphmQdasFeature

        total_features = EphmQdasFeature.objects.count()
        logger.info(f"开始监控所有特征点，总数: {total_features}")

        # 分批处理特征点
        for offset in range(0, total_features, self.batch_size):
            features = list(EphmQdasFeature.objects.all()[offset : offset + self.batch_size])
            if not features:
                break

            # 分组并行处理
            groups = self._group_features(features)
            futures = []
            for group in groups:
                future = self.thread_pool.submit(self._process_feature_group, group)
                futures.append(future)

            # 等待所有任务完成
            for future in futures:
                future.result()

        logger.info(f"完成所有特征点监控")

    def _group_features(self, features):
        """将特征点按区域分组，便于并行处理"""
        groups = {}
        for feature in features:
            zone = feature.zone
            if zone not in groups:
                groups[zone] = []
            groups[zone].append(feature)
        return list(groups.values())

    def _process_feature_group(self, features):
        """处理一组特征点"""

        try:
            # 批量获取最新数据
            zone = features[0].zone  # 同一组特征应该在同一区域
            latest_data_map = self._batch_get_latest_data(features, zone)

            # 批量检查特征状态
            status_map = self._batch_check_feature_status(features, latest_data_map)

            # 处理每个特征
            for feature in features:
                try:
                    if feature.id not in latest_data_map:
                        continue

                    latest_data = latest_data_map[feature.id]
                    feature_status = status_map[feature.id]

                    # 生成唯一标识，用于判断数据是否重复
                    data_identifier = f"{latest_data['wvdatzeit']}_{latest_data['wvwert']}"
                    data_key = f"qdas_data:{feature.zone}:{feature.op}:{feature.feature}:{data_identifier}"

                    # 判断是否是特殊区域和操作（CS:OP50）
                    is_special_feature = feature.zone == "CS" and feature.op == "OP50"

                    if is_special_feature:
                        # 特殊区域和操作的处理逻辑
                        self._process_special_feature(feature, latest_data, feature_status, data_key)
                    else:
                        # 常规处理逻辑
                        if feature_status in ["exceed_upper", "exceed_lower"]:
                            # 检查是否是重复数据
                            is_duplicate = self._check_duplicate_data(feature, latest_data)

                            # 保存超差数据（不保存重复数据）
                            if not self.redis_conn.exists(data_key):
                                self.save_qdas_warning(feature, latest_data, data_key)

                            # 增加超差计数
                            count_key = f"exceed_count:{feature.zone}:{feature.op}:{feature.feature}"
                            if not is_duplicate:
                                current_count = self.redis_conn.incr(count_key)
                                if current_count == 1:
                                    # 首次超差设置过期时间（合格后重置）
                                    self.redis_conn.expire(count_key, 24 * 60 * 60)
                            else:
                                # 如果是重复数据，获取当前计数但不增加
                                current_count = int(self.redis_conn.get(count_key) or 0)

                            # 判断是否需要发送消息
                            warning_key = f"limit_warning:{feature.zone}:{feature.op}:{feature.feature}"
                            last_alert_key = f"last_alert:{feature.zone}:{feature.op}:{feature.feature}"
                            asyncio.run(self.check_and_send_alert_async(feature, latest_data, self.redis_conn, warning_key, count_key, last_alert_key, current_count, feature_status))
                        else:
                            # 数据合格，重置计数
                            count_key = f"exceed_count:{feature.zone}:{feature.op}:{feature.feature}"
                            self.redis_conn.delete(count_key)

                except Exception as e:
                    logger.error(f"处理特征{feature.zone}/{feature.op}/{feature.feature} 时出现错误: {e}", exc_info=True)

        except Exception as e:
            logger.error(f"处理特征组时出现错误: {e}", exc_info=True)

    def _process_special_feature(self, feature, latest_data, feature_status, data_key):
        """处理特殊区域和操作（CS:OP50）的特征"""
        wvwert = latest_data["wvwert"]
        up_limit = feature.up_limit
        low_limit = feature.low_limit
        zone_op_feature = f"{feature.zone}:{feature.op}:{feature.feature}"

        # 判断是否超出原始公差范围（规则2基础条件）
        is_out_of_tolerance = (up_limit is not None and wvwert > up_limit) or (low_limit is not None and wvwert < low_limit)

        # 规则2相关Redis键
        exceed_list_key = f"exceed_list:{zone_op_feature}"
        last_alert_rule2_key = f"last_alert_rule2:{zone_op_feature}"  # 新增最后报警时间键
        is_rule2_alert = False

        if is_out_of_tolerance:
            # 记录超差事件
            timestamp = latest_data["wvdatzeit"]
            self.redis_conn.rpush(exceed_list_key, f"{timestamp}_{wvwert}")
            self.redis_conn.ltrim(exceed_list_key, -10, -1)  # 保持最近10条记录

            # 检查是否满足规则2条件（10件内≥2次超差）
            list_length = self.redis_conn.llen(exceed_list_key)
            if list_length >= 2:
                # 检查是否在冷却期内（新增频率限制逻辑）
                current_time = int(time.time())
                last_alert_time = self.redis_conn.get(last_alert_rule2_key)

                if not last_alert_time or (current_time - int(last_alert_time)) >= 3600:
                    is_rule2_alert = True
                    self.redis_conn.setex(last_alert_rule2_key, 3600, current_time)  # 更新最后报警时间

            # 发送规则2报警（满足频率限制时）
            if is_rule2_alert and not self.redis_conn.exists(data_key):
                self.save_qdas_warning(feature, latest_data, data_key)
                asyncio.run(self.send_special_alert(feature, latest_data, "rule2", wvwert, up_limit, low_limit))
        else:
            # 数据合格时清空记录
            self.redis_conn.delete(exceed_list_key)
            self.redis_conn.delete(last_alert_rule2_key)  # 合格时重置报警时间记录

        # 规则1：单值超差150报警（保持原逻辑）
        is_rule1_alert = (up_limit is not None and wvwert > (up_limit + 75)) or (low_limit is not None and wvwert < (low_limit - 75))

        if is_rule1_alert and not self.redis_conn.exists(data_key):
            self.save_qdas_warning(feature, latest_data, data_key)
            asyncio.run(self.send_special_alert(feature, latest_data, "rule1", wvwert, up_limit, low_limit))

    async def send_special_alert(self, feature, latest_data, rule_type, value, up_limit, low_limit):
        """发送特殊规则的报警消息"""
        message = ""
        if rule_type == "rule1":
            message = (
                f"【QDAS监控警报】{feature.feature_cn}\n"
                f"区域/工序：{feature.zone}/{feature.op}\n"
                f"当前值：{value}\n"
                f"控制范围：{low_limit}~{up_limit}\n"
                f"状态：<b>严重超差，请立即处理！</b>\n"
                f"时间：{latest_data['wvdatzeit']}\n"
                f"======================"
            )
        elif rule_type == "rule2":
            message = (
                f"【QDAS监控警报】{feature.feature_cn}\n"
                f"区域/工序：{feature.zone}/{feature.op}\n"
                f"当前值：{value}\n"
                f"控制范围：{low_limit}~{up_limit}\n"
                f"状态：<b>10件内多次超差，请关注！</b>\n"
                f"时间：{latest_data['wvdatzeit']}\n"
                f"======================"
            )

        # 异步发送消息
        async with aiohttp.ClientSession() as session:
            loop = asyncio.get_running_loop()
            await loop.run_in_executor(None, xiaozhi.send_message, message)  # 发给质量信息推送群
            # await loop.run_in_executor(None, xiaozhi.send_message_to_user, "21100351", message)  # 发给单人

        logger.info(f"为 {feature.zone}/{feature.op}/{feature.feature} 发送了特殊超差告警，规则：{rule_type}")

    def _batch_get_latest_data(self, features, zone):
        """批量获取特征的最新数据"""
        from .views import QdasHelper

        latest_data_map = {}
        try:
            # 批量获取数据（根据实际情况调整实现）
            for feature in features:
                data = QdasHelper.get_latest_qdas_data(wvteil=feature.teil, wvmerkmal=feature.merkmal, zone=zone)
                if data:
                    latest_data_map[feature.id] = data
        except Exception as e:
            logger.error(f"批量获取数据时出现错误: {e}", exc_info=True)

        return latest_data_map

    def _batch_check_feature_status(self, features, latest_data_map):
        """批量检查特征状态"""
        from .views import QdasHelper

        status_map = {}
        try:
            for feature in features:
                if feature.id in latest_data_map:
                    status = QdasHelper.check_feature_status(feature, latest_data_map[feature.id])
                    status_map[feature.id] = status
        except Exception as e:
            logger.error(f"批量检查特征状态时出现错误: {e}", exc_info=True)

        return status_map

    def _check_duplicate_data(self, feature, latest_data):
        """检查是否是重复数据"""
        # 获取最近保存的数据标识
        recent_data_key = f"recent_data:{feature.zone}:{feature.op}:{feature.feature}"
        recent_data = self.redis_conn.get(recent_data_key)

        # 生成当前数据标识，确认重复，并更新
        current_identifier = f"{latest_data['wvdatzeit']}_{latest_data['wvwert']}"
        is_duplicate = recent_data and recent_data.decode("utf-8") == current_identifier
        self.redis_conn.setex(recent_data_key, 24 * 60 * 60, current_identifier)

        return is_duplicate

    def save_qdas_warning(self, feature, latest_data, data_key):
        """保存超差数据"""
        from .models import EphmLimitsWarning

        try:
            EphmLimitsWarning.objects.create(
                zone=feature.zone,
                op=feature.op,
                item=feature.feature,
                item_cn=feature.feature_cn,
                item_level=feature.level,
                value=latest_data["wvwert"],
                up_limit=feature.up_limit,
                low_limit=feature.low_limit,
                ephm_measures="",
                person_in_charge="",
                timestamp=latest_data["wvdatzeit"],
                create_time=timezone.now(),
            )

            # 使用Redis标记该数据已保存（24小时过期）
            self.redis_conn.setex(data_key, 24 * 60 * 60, "1")

            logger.info(f"保存了 {feature.zone}/{feature.op}/{feature.feature} 的超差数据")

        except Exception as e:
            logger.error(f"保存{feature.zone}/{feature.op}/{feature.feature} 的超差数据时出现错误: {e}", exc_info=True)

    async def check_and_send_alert_async(self, feature, latest_data, redis_conn, warning_key, count_key, last_alert_key, current_count, status):
        """异步检查并发送告警消息"""
        try:
            # 判断超差次数是否达到设定级别
            if current_count < feature.level:
                return

            # 判断是否在冷却期内
            last_alert_time = redis_conn.get(last_alert_key)
            if last_alert_time:
                # 检查是否超过1小时
                now = int(time.time())
                if (now - int(last_alert_time)) < 3600:
                    return

            # 更新最后告警时间
            redis_conn.setex(last_alert_key, 24 * 60 * 60, int(time.time()))

            # 异步发送告警消息
            async with aiohttp.ClientSession() as session:
                await self._send_message_async(session, feature, latest_data, current_count)

            logger.info(f"为 {feature.zone}/{feature.op}/{feature.feature} 发送了超差告警，次数：{current_count}")

        except Exception as e:
            logger.error(f"发送 {feature.zone}/{feature.op}/{feature.feature} 的告警消息时出现错误: {e}", exc_info=True)

    async def _send_message_async(self, session, feature, latest_data, current_count):
        """异步发送消息"""
        # 根据实际情况实现异步发送消息
        loop = asyncio.get_running_loop()
        message = (
            f"【QDAS监控警报】{feature.feature_cn}\n"
            f"区域/工序：{feature.zone}/{feature.op}\n"
            f"当前值：{latest_data['wvwert']}\n"
            f"控制范围：{feature.low_limit}~{feature.up_limit}\n"
            f"状态：<b>连续超差次数：{current_count}/{feature.level},请关注！</b>\n"
            f"时间：{latest_data['wvdatzeit']}\n"
            f"======================"
        )

        await loop.run_in_executor(None, xiaozhi.send_message, message)
        # await loop.run_in_executor(None, xiaozhi.send_message_to_user, "21100351", message)  # 发给单人
