"""
LSTM模型定义和训练模块
"""

import tensorflow as tf
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import LSTM, Dense, Dropout, BatchNormalization
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import numpy as np
import os
import logging
import matplotlib.pyplot as plt
import sys
import time
import pandas as pd
from sqlalchemy import create_engine

# 添加项目根目录到路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class LSTMModel:
    """
    LSTM模型类，提供模型创建、训练、评估和预测功能
    增强版本包含：
    - 严格的数据验证
    - 梯度监控
    - 形状一致性检查
    """
    
    @staticmethod
    def _validate_data(X, y):
        """验证输入数据和标签的有效性"""
        if np.isnan(X).any() or np.isinf(X).any():
            raise ValueError("输入数据包含NaN或无限值")
        if np.isnan(y).any() or np.isinf(y).any():
            raise ValueError("标签数据包含NaN或无限值")
        if X.shape[0] != y.shape[0]:
            raise ValueError(f"样本数量不匹配: X={X.shape[0]}, y={y.shape[0]}")
    
    @staticmethod
    def load_from_csv(data_dir, sequence_length=24):
        """
        加载数据并预处理为LSTM模型可用的格式，预测所有污染物
        
        参数:
            data_dir (str): 数据目录路径
            sequence_length (int): 时间序列长度
            
        返回:
            tuple: (X, y) 特征和标签数据
            
        注意:
            - 自动预测所有可用污染物
            - 确保输入输出特征数一致
        """
        # 验证输入参数
        if not os.path.isdir(data_dir):
            raise ValueError(f"数据目录不存在: {data_dir}")
        if sequence_length <= 0:
            raise ValueError("sequence_length必须大于0")
            
        # 加载并合并所有CSV文件
        csv_files = [f for f in os.listdir(data_dir) if f.endswith('.csv')]
        if not csv_files:
            raise ValueError(f"在{data_dir}目录下未找到CSV文件")
        
        dfs = []
        for file in sorted(csv_files):
            try:
                df = pd.read_csv(os.path.join(data_dir, file))
                # 创建统一时间索引
                df['time'] = pd.to_datetime(
                    df['date'].astype(str) + ' ' +
                    df['hour'].astype(str).str.zfill(2) + ':00'
                )
                dfs.append(df)
            except Exception as e:
                logger.error(f"加载文件{file}失败: {e}")
                continue
                
        if not dfs:
            raise ValueError("没有成功加载任何数据文件")
            
        # 合并并排序数据
        df = pd.concat(dfs).sort_values('time')
        
        # 定义污染物特征集并过滤无效列
        pollutant_features = ['PM2.5', 'PM10', 'SO2', 'NO2', 'O3']
        
        # 筛选有效特征列
        valid_features = []
        for feature in pollutant_features:
            if feature in df.columns:
                # 检查是否为全NaN列
                if df[feature].isnull().all():
                    logger.warning(f"特征{feature}全为NaN，已跳过")
                    continue
                valid_features.append(feature)
        
        if not valid_features:
            raise ValueError("数据中未找到有效污染物特征")
            
        logger.info(f"使用有效特征: {valid_features}")
        
        # 增强数据预处理
        for feature in valid_features:
            # 1. 处理缺失值
            if df[feature].isnull().any():
                median_val = df[feature].median()
                df[feature] = df[feature].fillna(median_val)
                logger.info(f"填充{feature}的{df[feature].isnull().sum()}个缺失值")
                
            # 2. 异常值处理
            q_low = df[feature].quantile(0.01)
            q_high = df[feature].quantile(0.99)
            df[feature] = df[feature].clip(q_low, q_high)
            
            # 3. 标准化
            median = df[feature].median()
            iqr = df[feature].quantile(0.75) - df[feature].quantile(0.25)
            if iqr < 1e-6:
                iqr = 1.0
                logger.warning(f"特征{feature}的IQR过小，使用默认缩放")
            df[feature] = (df[feature] - median) / (iqr + 1e-8)
            
        # 创建时间序列数据集
        data = df[valid_features].values
        X, y = [], []
        for i in range(len(data) - sequence_length):
            X.append(data[i:i+sequence_length])
            # 返回所有污染物数据作为标签
            y.append(data[i+sequence_length])
            
        X, y = np.array(X), np.array(y)
        
        # 验证数据形状
        if X.shape[-1] != len(valid_features):
            raise ValueError(
                f"特征数量不匹配: 数据有{X.shape[-1]}个特征, "
                f"但需要{len(valid_features)}个特征"
            )
        
        # 验证数据形状
        if X.shape[0] != y.shape[0]:
            raise ValueError(f"样本数量不匹配: X={X.shape[0]}, y={y.shape[0]}")
        if X.shape[1] != sequence_length:
            raise ValueError(f"序列长度不匹配: 预期{sequence_length}, 实际{X.shape[1]}")
            
        logger.info(f"成功加载数据: X.shape={X.shape}, y.shape={y.shape}")
        return X, y

    @staticmethod
    def load_from_sql(connection_string, query, sequence_length=24, target_column='PM2.5'):
        """
        从SQL数据库加载数据并预处理为LSTM模型可用的格式
        
        参数:
            connection_string (str): 数据库连接字符串
            query (str): SQL查询语句
            sequence_length (int): 时间序列长度
            target_column (str): 目标列名
            
        返回:
            tuple: (X, y) 特征和标签数据
        """
        engine = create_engine(connection_string)
        df = pd.read_sql(query, engine)
        
        # 确保数据按时间排序
        if 'time' in df.columns:
            df = df.sort_values('time')
        
        # 选择所有污染物指标
        features = ['AQI','PM2.5','PM2.5_24h','O3','SO2_24h','SO2','PM10','PM10_24h','O3_24h','CO_24h','CO']
        available_features = [col for col in features if col in df.columns]
        
        if not available_features:
            raise ValueError("未找到任何污染物数据列")
            
        # 数据预处理
        data = df[available_features].values
        
        # 创建时间序列数据集
        X, y = [], []
        for i in range(len(data) - sequence_length):
            X.append(data[i:i+sequence_length])
            y.append(data[i+sequence_length])
            
        return np.array(X), np.array(y)
    
    def __init__(self, input_shape, model_path=None):
        """
        初始化LSTM模型，自动匹配输入输出形状
        
        参数:
            input_shape (tuple): 输入形状 (时间步长, 特征数)
            model_path (str, optional): 预训练模型路径
            
        注意:
            - 输出形状自动设置为输入特征数
            - 确保输入形状为2维 (时间步长, 特征数)
        """
        # 验证输入参数
        if len(input_shape) != 2:
            raise ValueError("input_shape必须是(时间步长, 特征数)的元组")
        if input_shape[0] <= 0 or input_shape[1] <= 0:
            raise ValueError("input_shape的维度必须大于0")
            
        self.input_shape = input_shape
        self.output_shape = input_shape[1]  # 输出形状=输入特征数
        self.model = None
        self.history = None
        
        # 加载或构建模型
        if model_path and os.path.exists(model_path):
            try:
                self.model = load_model(model_path)
                logger.info(f"成功加载预训练模型: {model_path}")
                # 验证加载模型的形状
                if (self.model.input_shape[1:] != self.input_shape or
                    self.model.output_shape[-1] != self.output_shape):
                    logger.warning("模型形状不匹配，将重建模型")
                    self._build_model()
            except Exception as e:
                logger.error(f"加载模型失败: {e}")
                self._build_model()
        else:
            logger.info("构建新模型")
            self._build_model()
    
    def _build_model(self):
        """
        构建LSTM模型
        """
        model = Sequential()
        
        # 添加输入标准化层
        model.add(tf.keras.layers.Normalization(axis=-1))
        
        # 第一层LSTM
        model.add(LSTM(
            units=50,
            return_sequences=True,
            input_shape=self.input_shape,
            recurrent_dropout=0.1,
            kernel_initializer='glorot_uniform',
            kernel_constraint=tf.keras.constraints.MaxNorm(3)
        ))
        model.add(BatchNormalization())
        model.add(Dropout(0.2))
        
        # 第二层LSTM
        model.add(LSTM(
            units=50,
            return_sequences=True,
            recurrent_dropout=0.1,
            kernel_initializer='glorot_uniform',
            kernel_constraint=tf.keras.constraints.MaxNorm(3)
        ))
        model.add(BatchNormalization())
        model.add(Dropout(0.2))
        
        # 第三层LSTM
        model.add(LSTM(
            units=50,
            return_sequences=False,
            recurrent_dropout=0.1,
            kernel_initializer='glorot_uniform',
            kernel_constraint=tf.keras.constraints.MaxNorm(3)
        ))
        model.add(BatchNormalization())
        model.add(Dropout(0.2))
        
        # 输出层
        model.add(Dense(units=self.output_shape, activation='linear'))
        
        # 编译模型
        optimizer = tf.keras.optimizers.Adam(
            learning_rate=0.0005,
            clipvalue=0.5
        )
        model.compile(
            optimizer=optimizer,
            loss='huber_loss',  # 对异常值更鲁棒的损失函数
            metrics=['mae']
        )
        
        self.model = model
        logger.info("LSTM模型构建完成")
        
    def summary(self):
        """
        打印模型结构摘要
        """
        return self.model.summary()
    
    def train(self, X_train, y_train, X_val=None, y_val=None, epochs=100, batch_size=32, 
              patience=10, save_path=None, verbose=1):
        """
        训练模型
        
        参数:
            X_train (numpy.ndarray): 训练特征数据
            y_train (numpy.ndarray): 训练标签数据
            X_val (numpy.ndarray, optional): 验证特征数据
            y_val (numpy.ndarray, optional): 验证标签数据
            epochs (int): 训练轮数
            batch_size (int): 批量大小
            patience (int): 早停的轮数
            save_path (str, optional): 模型保存路径
            verbose (int): 训练过程中的显示级别
            
        返回:
            tensorflow.keras.callbacks.History: 训练历史
        """
        callbacks = []
        
        # 早停
        early_stopping = EarlyStopping(
            monitor='val_loss' if X_val is not None else 'loss',
            patience=patience,
            restore_best_weights=True,
            verbose=1
        )
        callbacks.append(early_stopping)
        
        # 学习率递减
        reduce_lr = ReduceLROnPlateau(
            monitor='val_loss' if X_val is not None else 'loss',
            factor=0.5,
            patience=patience // 2,
            min_lr=1e-6,
            verbose=1
        )
        callbacks.append(reduce_lr)
        
        # 模型检查点
        if save_path:
            # 确保目录存在
            os.makedirs(os.path.dirname(save_path), exist_ok=True)
            
            model_checkpoint = ModelCheckpoint(
                filepath=save_path,
                monitor='val_loss' if X_val is not None else 'loss',
                save_best_only=True,
                verbose=1
            )
            callbacks.append(model_checkpoint)
        
        # 记录开始时间
        start_time = time.time()
        
        # 训练模型
        if X_val is not None and y_val is not None:
            # 使用验证集训练
            self.history = self.model.fit(
                X_train, y_train,
                validation_data=(X_val, y_val),
                epochs=epochs,
                batch_size=batch_size,
                callbacks=callbacks,
                verbose=verbose
            )
        else:
            # 不使用验证集训练
            self.history = self.model.fit(
                X_train, y_train,
                epochs=epochs,
                batch_size=batch_size,
                callbacks=callbacks,
                verbose=verbose
            )
        
        # 计算训练时间
        training_time = time.time() - start_time
        logger.info(f"模型训练完成，共耗时 {training_time:.2f} 秒")
        
        return self.history
    
    def evaluate(self, X_test, y_test, batch_size=32):
        """
        评估模型性能
        
        参数:
            X_test (numpy.ndarray): 测试特征数据
            y_test (numpy.ndarray): 测试标签数据
            batch_size (int): 批量大小
            
        返回:
            tuple: (loss, metrics)
        """
        if self.model is None:
            logger.error("模型未初始化，无法评估")
            return None
        
        results = self.model.evaluate(X_test, y_test, batch_size=batch_size, verbose=1)
        
        # 打印评估结果
        metrics_names = self.model.metrics_names
        for name, value in zip(metrics_names, results):
            logger.info(f"{name}: {value:.4f}")
        
        return results
    
    def predict(self, X, batch_size=32):
        """
        使用模型进行预测
        
        参数:
            X (numpy.ndarray): 预测的输入数据
            batch_size (int): 批量大小
            
        返回:
            numpy.ndarray: 预测结果
        """
        if self.model is None:
            logger.error("模型未初始化，无法预测")
            return None
        
        predictions = self.model.predict(X, batch_size=batch_size, verbose=0)
        return predictions
    
    def save(self, filepath):
        """
        保存模型
        
        参数:
            filepath (str): 保存路径
        """
        if self.model is None:
            logger.error("模型未初始化，无法保存")
            return
        
        try:
            # 确保目录存在
            os.makedirs(os.path.dirname(filepath), exist_ok=True)
            
            self.model.save(filepath)
            logger.info(f"模型已保存到 {filepath}")
        except Exception as e:
            logger.error(f"保存模型失败: {e}")
    
    def plot_history(self, save_path=None):
        """
        绘制训练历史图表
        
        参数:
            save_path (str, optional): 图表保存路径
        """
        if self.history is None:
            logger.warning("没有训练历史记录可绘制")
            return
        
        plt.figure(figsize=(12, 5))
        
        # 绘制损失曲线
        plt.subplot(1, 2, 1)
        plt.plot(self.history.history['loss'], label='训练损失')
        if 'val_loss' in self.history.history:
            plt.plot(self.history.history['val_loss'], label='验证损失')
        plt.title('模型损失')
        plt.xlabel('轮次')
        plt.ylabel('损失')
        plt.legend()
        plt.grid(True)
        
        # 绘制MAE曲线
        plt.subplot(1, 2, 2)
        plt.plot(self.history.history['mae'], label='训练MAE')
        if 'val_mae' in self.history.history:
            plt.plot(self.history.history['val_mae'], label='验证MAE')
        plt.title('平均绝对误差')
        plt.xlabel('轮次')
        plt.ylabel('MAE')
        plt.legend()
        plt.grid(True)
        
        plt.tight_layout()
        
        if save_path:
            # 确保目录存在
            os.makedirs(os.path.dirname(save_path), exist_ok=True)
            plt.savefig(save_path)
            logger.info(f"训练历史图表已保存到 {save_path}")
        
        plt.show()
    
    def plot_predictions(self, X_test, y_test, scaler=None, save_path=None, n_samples=100):
        """
        绘制预测结果对比图
        
        参数:
            X_test (numpy.ndarray): 测试特征数据
            y_test (numpy.ndarray): 测试真实值
            scaler (object, optional): 标准化器，用于将数据转换回原始尺度
            save_path (str, optional): 图表保存路径
            n_samples (int): 要显示的样本数量
        """
        if self.model is None:
            logger.warning("模型未初始化，无法预测")
            return
        
        # 获取预测结果
        predictions = self.predict(X_test[:n_samples])
        actual = y_test[:n_samples]
        
        # 如果提供了scaler，将预测和真实值转换回原始尺度
        if scaler is not None:
            try:
                predictions = scaler.inverse_transform(predictions)
                actual = scaler.inverse_transform(actual)
            except Exception as e:
                logger.error(f"使用scaler进行逆转换失败: {e}")
        
        # 绘制对比图 - 为每种污染物创建子图
        plt.figure(figsize=(15, 10))
        pollutants = ['AQI','PM2.5','PM2.5_24h','O3','SO2_24h','SO2','PM10','PM10_24h','O3_24h','CO_24h','CO']
        
        for i in range(self.output_shape):
            plt.subplot(self.output_shape, 1, i+1)
            plt.plot(actual[:, i], label='实际值', marker='o', linestyle='-', markersize=3)
            plt.plot(predictions[:, i], label='预测值', marker='x', linestyle='-', markersize=3)
            plt.title(f'{pollutants[i]}预测对比')
            plt.xlabel('样本索引')
            plt.ylabel('浓度')
            plt.legend()
            plt.grid(True)
        
        plt.tight_layout()
        
        if save_path:
            # 确保目录存在
            os.makedirs(os.path.dirname(save_path), exist_ok=True)
            plt.savefig(save_path)
            logger.info(f"预测结果对比图已保存到 {save_path}")
        
        plt.show()
        
        return predictions, actual
    
    def hyperparameter_tuning(self, X_train, y_train, X_val, y_val, units_list=[50, 100], dropout_list=[0.1, 0.2], learning_rates=[0.001, 0.0001], batch_sizes=[32, 64], epochs=50, patience=5):
        """
        超参数调优方法
        
        参数:
            units_list (list): LSTM单元数列表
            dropout_list (list): Dropout比率列表
            learning_rates (list): 学习率列表
            batch_sizes (list): 批量大小列表
            epochs (int): 训练轮数
            patience (int): 早停的轮数
        """
        best_val_loss = float('inf')
        best_params = {}
        results = []
    



        best_val_loss = float('inf')
        best_params = {}
        results = []
        
        for units in units_list:
            for dropout in dropout_list:
                for lr in learning_rates:
                    for batch_size in batch_sizes:
                        logger.info(f"尝试参数: units={units}, dropout={dropout}, lr={lr}, batch_size={batch_size}")
                        
                        # 重建模型
                        self._build_model_with_params(units=units, dropout=dropout, lr=lr)
                        
                        # 训练模型
                        history = self.model.fit(
                            X_train, y_train,
                            validation_data=(X_val, y_val),
                            epochs=epochs,
                            batch_size=batch_size,
                            callbacks=[
                                EarlyStopping(
                                    monitor='val_loss',
                                    patience=patience,
                                    restore_best_weights=True
                                )
                            ],
                            verbose=0
                        )
                        
                        # 获取最佳验证损失
                        val_loss = min(history.history['val_loss'])
                        
                        results.append({
                            'units': units,
                            'dropout': dropout,
                            'learning_rate': lr,
                            'batch_size': batch_size,
                            'val_loss': val_loss
                        })
                        
                        logger.info(f"验证损失: {val_loss:.4f}")
                        
                        if val_loss < best_val_loss:
                            best_val_loss = val_loss
                            best_params = {
                                'units': units,
                                'dropout': dropout,
                                'learning_rate': lr,
                                'batch_size': batch_size
                            }
        
        # 使用最佳参数重建模型
        logger.info(f"最佳参数: {best_params}, 验证损失: {best_val_loss:.4f}")
        self._build_model_with_params(**best_params)
        
        return best_params, best_val_loss, results
    
    def _build_model_with_params(self, units=50, dropout=0.2, lr=0.001):
        """
        使用指定参数构建模型
        
        参数:
            units (int): LSTM单元数
            dropout (float): Dropout比率
            lr (float): 学习率
        """
        model = Sequential()
        
        # 第一层LSTM
        model.add(LSTM(
            units=units,
            return_sequences=True,
            input_shape=self.input_shape,
            recurrent_dropout=dropout / 2
        ))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))
        
        # 第二层LSTM
        model.add(LSTM(
            units=units,
            return_sequences=True,
            recurrent_dropout=dropout / 2
        ))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))
        
        # 第三层LSTM
        model.add(LSTM(
            units=units,
            return_sequences=False,
            recurrent_dropout=dropout / 2
        ))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))
        
        # 输出层
        model.add(Dense(units=self.output_shape))
        
        # 编译模型
        model.compile(
            optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
            loss='mean_squared_error',
            metrics=['mae']
        )
        
        self.model = model
        return model

if __name__ == "__main__":
    # 设置随机种子以确保结果可复现
    np.random.seed(42)
    tf.random.set_seed(42)
    
    # 设置数据目录和模型保存路径
    data_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data", "result")
    model_save_dir = os.path.join(os.path.dirname(__file__), "plots")
    os.makedirs(model_save_dir, exist_ok=True)
    
    # 加载数据
    logger.info("正在加载数据...")
    sequence_length = 24  # 使用24小时的数据预测下一个小时
    X, y = LSTMModel.load_from_csv(data_dir, sequence_length=sequence_length)
    
    # 划分训练集和验证集
    train_size = int(len(X) * 0.8)
    X_train, X_val = X[:train_size], X[train_size:]
    y_train, y_val = y[:train_size], y[train_size:]
    
    # 实例化模型
    model = LSTMModel(input_shape=(X_train.shape[1], X_train.shape[2]))
    
    # 训练模型
    history = model.train(X_train, y_train, X_val, y_val, epochs=100, batch_size=32)
    
    # 保存模型
    model.save(os.path.join(model_save_dir, "lstm_model.h5"))
    
    # 绘制训练历史
    model.plot_history(os.path.join(model_save_dir, "training_history.png"))
    
    # 定义模型参数
    units = 50
    dropout = 0.2
    lr = 0.001
    batch_size = 32
    
    # 使用参数构建模型
    model._build_model_with_params(units=units, dropout=dropout, lr=lr)
    
    # 保存模型
    model.save(os.path.join(model_save_dir, "lstm_model.h5"))
    
    # 绘制训练历史
    model.plot_history(os.path.join(model_save_dir, "training_history.png"))
