import paho.mqtt.client as mqtt
import json
import pymysql
from datetime import datetime
import atexit
import logging
import threading
from collections import defaultdict
import time
import pandas as pd
import queue
import os
import configparser
from typing import Dict, List, Tuple

# ------------------------ 配置管理 ------------------------
class ConfigManager:
    """统一管理所有配置参数"""
    def __init__(self):
        self.config = configparser.ConfigParser()
        self.config.read('MqttCapacityTest/config.ini')
        
    @property
    def mqtt_config(self) -> Dict:
        return dict(self.config['MQTT'])
        
    @property
    def db_config(self) -> Dict:
        return dict(self.config['DATABASE'])
        
    @property
    def app_config(self) -> Dict:
        return {
            'log_dir': self.config['APP']['LogDir'],
            'batch_interval': int(self.config['APP']['BatchInterval']),
            'batch_size': int(self.config['APP']['BatchSize']),
            'export_interval': int(self.config['APP']['ExportInterval']),
            'device_ids': json.loads(self.config['APP']['DeviceIDs'])
        }

# ------------------------ 日志管理 ------------------------
class LogManager:
    """日志配置和文件管理"""
    def __init__(self, log_dir: str):
        self.log_dir = log_dir
        self._ensure_directory()
        self.logger = self._configure_logger()
        
    def _ensure_directory(self):
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)
            print(f'Created log directory: {self.log_dir}')
            
    def _configure_logger(self):
        log_file = os.path.join(
            self.log_dir, 
            f"{datetime.now().strftime('%Y_%m_%d_%H_%M_%S')}_Mqgateway_monitor.log"
        )
        
        logger = logging.getLogger(__name__)
        logger.setLevel(logging.INFO)
        
        formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
        
        file_handler = logging.FileHandler(log_file)
        file_handler.setFormatter(formatter)
        
        stream_handler = logging.StreamHandler()
        stream_handler.setFormatter(formatter)
        
        logger.addHandler(file_handler)
        logger.addHandler(stream_handler)
        
        return logger

# ------------------------ 设备状态管理 ------------------------
class DeviceStatsManager:
    """设备状态统计和批量处理"""
    def __init__(self, batch_size: int):
        self.start_time = datetime.now()
        self.batch_size = batch_size
        self.lock = threading.Lock()
        self._init_counters()
        
    def _init_counters(self):
        self.update_cache = defaultdict(list)
        self.total_message_count = defaultdict(int)
        self.valid_message_count = defaultdict(int)
        
    def add_total_count(self, device_id: str):
        with self.lock:
            self.total_message_count[device_id] += 1
            
    def add_valid_update(self, device_id: str, x: float, y: float, z: float):
        with self.lock:
            self.update_cache[device_id].append((x, y, z, datetime.now()))
            self.valid_message_count[device_id] += 1
            if len(self.update_cache[device_id]) >= self.batch_size:
                self._flush_device(device_id)
                
    def _flush_device(self, device_id: str):
        with self.lock:
            if device_id in self.update_cache:
                process_queue.put((device_id, self.update_cache.pop(device_id)))
                
    def get_stats(self) -> Dict:
        with self.lock:
            return {
                "total_runtime": datetime.now() - self.start_time,
                "start_time": self.start_time,
                "end_time": datetime.now(),
                "total_message_counts": dict(self.total_message_count),
                "valid_message_counts": dict(self.valid_message_count)
            }
            
    def reset_valid_counters(self):
        with self.lock:
            self.valid_message_count.clear()

# ------------------------ 数据库管理 ------------------------
class DatabaseManager:
    """数据库连接和批量操作"""
    def __init__(self, config: Dict):
        self.config = config
        self.conn = self._create_connection()
        
    def _create_connection(self):
        return pymysql.connect(**self.config)
            
    def health_check(self):
        try:
            with self.conn.cursor() as cursor:
                cursor.execute("SELECT 1")
            return True
        except pymysql.OperationalError:
            try:
                self.conn.ping(reconnect=True)  # 主动重连
                return True
            except Exception as e:
                logger.error(f"Database reconnection failed: {str(e)}")
                return False
            
def batch_update(self, device_id: str, updates: List[Tuple]):
    """执行批量更新操作，不存在时插入新数据"""
    # 调整元组结构为 (device_id, x, y, z, update_time)
    valid_updates = [
        (f"GW{device_id}", x, y, z, ts)
        for (x, y, z, ts) in updates
        if not any(pd.isna(val) for val in [x, y, z])
    ]
    
    if not valid_updates:
        return 0
        
    try:
        if not self.health_check():
            raise pymysql.OperationalError("Connection lost")
            
        with self.conn.cursor() as cursor:
            # 使用 ON DUPLICATE KEY UPDATE 实现 upsert
            cursor.executemany(
                "INSERT INTO map_device (device_id, x, y, z, update_time) "
                "VALUES (%s, %s, %s, %s, %s) "
                "ON DUPLICATE KEY UPDATE "
                "x = VALUES(x), y = VALUES(y), z = VALUES(z), update_time = VALUES(update_time)",
                valid_updates
            )
            self.conn.commit()
        return len(valid_updates)
    except Exception as e:
        self.conn.rollback()
        raise e

# ------------------------ MQTT 客户端管理 ------------------------
class MQTTClientManager:
    """MQTT客户端管理"""
    def __init__(self, config: Dict, device_ids: List[str]):
        self.client = mqtt.Client()
        self.device_ids = device_ids
        self._connected = False
        self._configure_client(config)
        
    def _configure_client(self, config: Dict):
        self.client.on_connect = self._on_connect
        self.client.on_message = self._on_message
        self.client.on_disconnect = self._on_disconnect
        self.client.on_message = self._on_message
        
        # 配置自动重连参数
        self.client.reconnect_delay_set(
            min_delay=1,   # 最小重连间隔1秒
            max_delay=120  # 最大重连间隔120秒
        )
        
        self.client.connect_async(  # 改为异步连接
            config['host'], 
            int(config['port']), 
            int(config['keepalive'])
        )
        self.client.loop_start()  # 启动后台网络线程
        
    def _on_disconnect(self, client, userdata, rc):
        """处理断开连接事件"""
        self._connected = False
        if rc == 0:
            logger.info("MQTT正常断开")
        else:
            logger.warning(f"MQTT意外断开，正在尝试重连 (错误码: {rc})")
            # 自动重连由Paho库自动处理，此处仅记录日志
        
    def _on_connect(self, client, userdata, flags, rc):
        """优化后的连接回调"""
        self._connected = True
        if rc == mqtt.MQTT_ERR_SUCCESS:
            logger.info("MQTT连接成功！服务端返回码: 0")
            client.subscribe("#")
            logger.debug("已重新订阅所有主题")
        else:
            error_codes = {
                1: "不支持的协议版本",
                2: "客户端标识无效",
                3: "服务器不可用",
                4: "用户名/密码错误",
                5: "未授权"
            }
            error_msg = error_codes.get(rc, f"未知错误 ({rc})")
            logger.error(f"MQTT连接失败: {error_msg}")
            
    def connection_status(self) -> bool:
        """获取当前连接状态"""
        return self._connected
            
    def _on_message(self, client, userdata, msg):
        try:
            device_id = self._match_device_id(msg.topic)
            if not device_id:
                return
                
            stats_manager.add_total_count(device_id)
            
            position = json.loads(msg.payload.decode()).get('position', {})
            x, y, z = self._parse_coordinates(position,device_id)
            
            stats_manager.add_valid_update(device_id, x, y, z)
            logger.debug(f"Valid coordinates: {device_id} - X:{x:.2f}, Y:{y:.2f}, Z:{z:.2f}")
            
        except (json.JSONDecodeError, KeyError, ValueError) as e:
            # logger.warning(f"Invalid message format: {str(e)}")
            pass
        except Exception as e:
            # logger.error(f"Message processing error: {str(e)}")
            pass
            
    def _match_device_id(self, topic: str) -> str:
        """优化设备ID匹配逻辑"""
        topic_lower = topic.lower()
        for dev_id in self.device_ids:
            if f"/{dev_id.lower()}/" in topic_lower or f"gw{dev_id.lower()}/" in topic_lower:
                return dev_id
        return ""
        
    def _parse_coordinates(self, position: Dict, device_id) -> Tuple[float, float, float]:
        """坐标解析校验"""
        try:
            x = float(position['x'])
            y = float(position['y'])
            z = float(position['z'])
            if any(pd.isna(val) for val in [x, y, z]):
                raise ValueError(f"{device_id}: NaN values detected")
            return x, y, z
        except KeyError as e:
            # raise ValueError(f"Missing coordinate: {str(e)}")
            pass

# ------------------------ 核心业务逻辑 ------------------------
MAX_RETRIES = 3  # 最大重试次数

def batch_processor(db_manager: DatabaseManager):
    """批量数据处理线程（增加重试限制）"""
    while True:
        try:
            device_id, updates = process_queue.get(timeout=config.app_config['batch_interval'])
            retry_count = 0  # 初始化重试计数器
            try:
                count = db_manager.batch_update(device_id, updates)
                logger.info(f"Batch update success: {device_id} ({count} records)")
            except pymysql.err.DataError as e:
                # 数据错误（如范围溢出），不再重试
                logger.error(f"Data error (device {device_id}): {str(e)}, discarding updates")
            except pymysql.err.OperationalError as e:
                # 连接问题，尝试重试
                if retry_count < MAX_RETRIES:
                    logger.warning(f"Operational error (device {device_id}), retrying... ({retry_count + 1}/{MAX_RETRIES})")
                    process_queue.put((device_id, updates))
                    retry_count += 1
                else:
                    logger.error(f"Max retries exceeded for device {device_id}, discarding updates")
            except Exception as e:
                logger.error(f"Unexpected error: {str(e)}")
                process_queue.put((device_id, updates))
        except queue.Empty:
            flush_pending_updates()
            
def flush_pending_updates():
    """强制刷新所有待处理数据（清空队列）"""
    while not process_queue.empty():
        try:
            process_queue.get_nowait()
        except queue.Empty:
            break
    logger.warning("Emergency queue flush completed")
            
def export_stats():
    """统计信息导出逻辑"""
    try:
        stats = stats_manager.get_stats()
        logger.info("\n运行统计:")
        logger.info(f"开始时间: {stats['start_time']}")
        logger.info(f"结束时间: {stats['end_time']}")
        logger.info(f"总运行时间: {stats['total_runtime']}")
        valid = stats.get('valid_message_counts', {})
        logger.info(f"有效: {valid}次")
        df = _prepare_stats_dataframe(stats)
        _save_to_csv(df)
        stats_manager.reset_valid_counters()
    except Exception as e:
        logger.error(f"Stats export failed: {str(e)}")
        
def _prepare_stats_dataframe(stats: Dict):
    """准备统计数据表格"""
    data = {'DeviceID': [], 
            'Currenttime':[],
            'TotalRuntime': [], 
            'ValidUpdates': []}
    
    all_devices = set(stats['total_message_counts'].keys()).union(
        stats['valid_message_counts'].keys())
    
    CurrentRuntime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')    
    for dev in sorted(all_devices):
        data['DeviceID'].append(f"GW{dev}")
        data['Currenttime'].append(CurrentRuntime)
        data['TotalRuntime'].append(str(stats['total_runtime']))
        data['ValidUpdates'].append(stats['valid_message_counts'].get(dev, 0))
        
    return pd.DataFrame(data)
    
def _save_to_csv(df: pd.DataFrame):
    """安全保存到CSV文件"""
    # 生成CSV文件名（假设配置已更新为CSV路径）
    filename = os.path.join(
        config.app_config['log_dir'],
        f"{csv_log_path}"  # 配置应指向类似"stats.csv"的文件
    )
    
    # 智能处理header写入（仅当文件不存在时写入列头）
    header = not os.path.exists(filename)
    
    # 使用更高效的追加模式写入CSV
    df.to_csv(
        filename,
        mode='a',        # 追加模式
        index=False,     # 不保存索引
        header=header,   # 仅首次写入时包含列头
        encoding='utf-8'
    )
    
    logger.info(f"Stats exported to {filename}")


def connection_monitor():
    """连接状态监控线程"""
    while True:
        if not mqtt_client.connection_status():
            logger.warning("MQTT连接已断开，等待重连...")
        time.sleep(60)

def export_stats_periodically(interval):
    """定期导出统计信息"""
    while True:
        time.sleep(interval)
        export_stats()
            
# ------------------------ 初始化 ------------------------
if __name__ == "__main__":
    # 初始化配置
    config = ConfigManager()
    
    # 初始化日志系统
    log_manager = LogManager(config.app_config['log_dir'])
    logger = log_manager.logger
    
    # 初始化全局组件
    process_queue = queue.Queue(maxsize=100)
    stats_manager = DeviceStatsManager(config.app_config['batch_size'])
    db_manager = DatabaseManager(config.db_config)
    
    try:
        # 启动MQTT客户端
        mqtt_client = MQTTClientManager(
            config.mqtt_config,
            config.app_config['device_ids']
        )
        
        # 启动处理线程
        threading.Thread(target=batch_processor, args=(db_manager,), daemon=True).start()
        # 监控MQTt连接信息
        threading.Thread(target=connection_monitor, daemon=True).start()
        csv_log_path =f"{time.strftime('%Y_%m_%d_%H_%M_%S')}_device_stats.csv"
        # 在主程序中启动定期导出线程
        threading.Thread(target=export_stats_periodically(config.app_config['export_interval']), daemon=True).start()
        logger.info("启动定期导出线程...")
        
        # 注册退出处理
        atexit.register(flush_pending_updates)
        atexit.register(export_stats_periodically)
        atexit.register(lambda: mqtt_client.client.disconnect())
        
        logger.info("Service started successfully")
        mqtt_client.client.loop_forever()
        
    except KeyboardInterrupt:
        logger.info("Service stopped by user")
    except Exception as e:
        logger.critical(f"Critical error: {str(e)}")
    finally:
        db_manager.conn.close()