# -*- coding: utf-8 -*-
"""
独立线程：轮询 outbox → relay → 标记完成
"""
from __future__ import annotations

from threading import Thread, Event

from common.database import DatabaseManager
from common.logger import create_logger
from mqtt.consumer.domain.relay import MessageRelay
from mqtt.consumer.repository.outbox_repository import OutboxRepository

logger = create_logger("worker.outbox_relay", "INFO")


class OutboxRelayThread(Thread):
	def __init__(self, db: DatabaseManager, relay: MessageRelay, interval: int = 0.5, idle_interval: int = 2):
		super().__init__(name="OutboxRelay", daemon=True)
		self.db = db
		self.repo = OutboxRepository(db)
		self.relay = relay
		self.interval = interval  # 有事件时的轮询间隔（减少到500ms）
		self.idle_interval = idle_interval  # 无事件时的轮询间隔（减少到2秒）
		self._running = True
		self._consecutive_empty_polls = 0  # 连续空轮询计数
		self._wakeup_event = Event()  # 用于立即唤醒轮询线程
		# 获取Redis实例用于删除key
		self._redis = None
		try:
			import common.redis
			self._redis = common.redis.get_redis_service()
			if self._redis and self._redis.is_available():
				logger.info("OutboxRelay已连接Redis，支持key清理")
			else:
				logger.warning("OutboxRelay未连接Redis，无法清理key")
		except Exception as e:
			logger.warning(f"OutboxRelay连接Redis失败: {e}")

	def stop(self):
		self._running = False
		self._wakeup_event.set()  # 唤醒线程以便快速退出

	def notify_new_event(self):
		"""通知有新事件，立即唤醒轮询线程"""
		self._wakeup_event.set()

	def run(self):
		logger.info("OutboxRelay 线程启动")
		while self._running:
			try:
				# 关键修复：检查连接状态，如果未连接则重新连接
				if not self.db.is_connected():
					self.db.reconnect()
					# 重连后刷新事务快照
					self.db.execute_query("START TRANSACTION", commit=False)
					self.db.execute_query("COMMIT", commit=False)

				# 查询未处理事件
				total_count_sql = "SELECT COUNT(*) as total FROM am_outbox WHERE processed=0"
				total_result = self.db.execute_query(total_count_sql, commit=False)
				total_unprocessed = total_result[0]['total'] if total_result else 0

				if total_unprocessed > 0:
					# 增强诊断：先查询未处理事件的ID列表
					id_list_sql = "SELECT id, event_type, alarm_id, meter_code FROM am_outbox WHERE processed=0 ORDER BY id ASC LIMIT 10"
					id_list_result = self.db.execute_query(id_list_sql, commit=False)
					if id_list_result:
						id_info = ', '.join([f"id={row['id']}({row['event_type']}, alarm_id={row['alarm_id']})" for row in id_list_result])
						logger.info(f"[DIAG-OutboxRelay] 未处理事件列表: {id_info}")

					rows = self.repo.poll_unprocessed(50)
					if not rows:
						logger.warning(
							f"[DIAG-OutboxRelay] 查询异常：数据库显示有 {total_unprocessed} 个事件，但查询返回0条")
						# 尝试刷新连接后重试
						logger.info("[DIAG-OutboxRelay] 尝试重新连接数据库并重试查询")
						self.db.reconnect()
						# 重连后刷新事务快照
						self.db.execute_query("START TRANSACTION", commit=False)
						self.db.execute_query("COMMIT", commit=False)
						rows = self.repo.poll_unprocessed(50)
						if rows:
							logger.info(f"[DIAG-OutboxRelay] 重试后查询成功，获得 {len(rows)} 个事件")
						else:
							logger.error(f"[DIAG-OutboxRelay] 重试后仍然查询失败，跳过本次轮询")
				else:
					rows = []

				# 简化日志：只在有事件时记录关键信息
				if rows:
					logger.info(f"[DIAG-OutboxRelay] 处理事件: {len(rows)}个，总数: {total_unprocessed}")

				if not rows:
					# 没有待处理事件，增加空轮询计数
					self._consecutive_empty_polls += 1
					# 动态调整轮询间隔：初始使用短间隔，逐渐增加
					if self._consecutive_empty_polls <= 3:
						sleep_time = self.interval
					else:
						sleep_time = self.idle_interval
					# 使用Event.wait()而不是time.sleep()，可以被立即唤醒
					self._wakeup_event.wait(sleep_time)
					self._wakeup_event.clear()
					continue

				# 有事件时重置空轮询计数
				self._consecutive_empty_polls = 0

				# 有待处理事件才开启事务
				with self.db.transaction():
					for row in rows:
						try:
							# 反序列化 JSON payload
							import json
							payload = json.loads(row["payload_json"]) if isinstance(row["payload_json"], str) else row[
								"payload_json"]

							# 确保 meter_code 在 payload 中（优先使用数据库字段值）
							db_meter_code = row.get("meter_code")
							payload_meter_code = payload.get("meter_code")

							if db_meter_code and not payload_meter_code:
								payload["meter_code"] = db_meter_code
							elif not payload_meter_code:
								logger.warning(f"[事件{row['id']}] 未找到 meter_code，事件类型: {row['event_type']}")

							# 增强日志：记录每个处理的事件
							alarm_type = payload.get('alarm_type', 'UNKNOWN')
							logger.info(f"[OutboxRelay] 处理事件: id={row['id']}, type={row['event_type']}, alarm_type={alarm_type}, meter_code={db_meter_code}")

							self.relay.relay(row["event_type"], payload)
							self.repo.mark_processed(row["id"])

							# 处理完成后立即删除对应的Redis key，彻底消除竞态
							self._cleanup_redis_key(row["fingerprint"], row["event_type"], alarm_type)

						except Exception as e:
							logger.error(f"[事件{row['id']}] 处理失败: {e}", exc_info=True)
					# 继续处理其他事件，但不标记为已处理

				# 处理完事件后，使用较短的间隔继续轮询
				self._wakeup_event.wait(self.interval)
				self._wakeup_event.clear()
			except Exception as e:
				logger.exception("轮询异常: %s", e)
				self._wakeup_event.wait(self.interval)  # 异常时使用短间隔重试
				self._wakeup_event.clear()
		logger.info("OutboxRelay 线程退出")

	def _cleanup_redis_key(self, fingerprint: str, event_type: str, alarm_type: str) -> None:
		"""
		清理Redis中的去重 key

		Args:
			fingerprint: 事件指纹
			event_type: 事件类型
			alarm_type: 告警类型
		"""
		if not self._redis or not self._redis.is_available():
			return

		try:
			key = f"outbox:processed:{fingerprint}"
			# 原子性清理操作
			self._atomic_cleanup_key(key, fingerprint, event_type, alarm_type)

		except Exception as e:
			logger.error(f"[DIAG-清理Redis] 清理key异常: {e}", exc_info=True)
			# 特别关注TAMPER告警的清理异常
			if alarm_type and 'TAMPER' in alarm_type:
				logger.error(f"[DIAG-清理Redis] TAMPER告警key清理异常: {alarm_type}")

	def _atomic_cleanup_key(self, key: str, fingerprint: str, event_type: str, alarm_type: str) -> None:
		"""
		原子性清理Redis key，防止并发残留

		Args:
			key: Redis key
			fingerprint: 事件指纹
			event_type: 事件类型
			alarm_type: 告警类型
		"""
		# 使用Lua脚本确保原子性：检查存在+删除
		lua_script = """
		local key = KEYS[1]
		local exists = redis.call('EXISTS', key)
		if exists == 1 then
			local ttl = redis.call('TTL', key)
			local deleted = redis.call('DEL', key)
			return {deleted, ttl}
		else
			return {0, -2}
		end
		"""

		max_retries = 2
		for attempt in range(max_retries + 1):
			try:
				# 执行Lua脚本，使用RedisService的eval_lua_script方法
				result = self._redis.eval_lua_script(lua_script, [key], [])
				deleted, ttl_before = result if result else [0, -2]

				if deleted > 0:
					# 只记录TAMPER关键事件的清理
					if alarm_type and 'TAMPER' in alarm_type:
						logger.info(f"[DIAG-清理Redis] TAMPER key已清理: {alarm_type}")
					break
				else:
					break

			except Exception as retry_error:
				if attempt < max_retries:
					logger.warning(f"[DIAG-清理Redis] 删除key失败，将重试: {retry_error}")
					import time
					time.sleep(0.1 * (attempt + 1))  # 指数退让
				else:
					raise retry_error

		# 验证清理结果：再次检查key是否仍然存在
		self._verify_cleanup_result(key, event_type, alarm_type)

	def _verify_cleanup_result(self, key: str, event_type: str, alarm_type: str) -> None:
		"""验证Redis key清理结果"""
		try:
			key_exists_final = self._redis.exists(key)
			if key_exists_final:
				ttl_final = self._redis.ttl(key)
				logger.warning(f"[DIAG-清理Redis] 清理验证失败，key仍存在: TTL={ttl_final}")
				# 特别关注TAMPER告警的清理失败
				if alarm_type and 'TAMPER' in alarm_type:
					logger.error(f"[DIAG-清理Redis] TAMPER key清理失败: {alarm_type}")
					# 强制再次尝试清理
					final_deleted = self._redis.delete(key)
					logger.info(f"[DIAG-清理Redis] TAMPER key强制清理结果: deleted={final_deleted}")
		except Exception as e:
			logger.error(f"[DIAG-清理Redis] 验证清理结果异常: {e}")
