import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout, Bidirectional, Attention, Input, Concatenate
from tensorflow.keras.callbacks import EarlyStopping

# 设置中文字体支持
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'SimSun', 'Arial Unicode MS']  # 尝试多种中文字体
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

def load_processed_data(file_path='../data/feature_engineering/room_occupancy_with_features.csv'):
    """
    加载经过特征工程处理的数据
    
    参数:
    file_path: 处理后数据的文件路径
    
    返回:
    DataFrame: 加载的数据
    """
    print(f"加载数据: {file_path}")
    # 检查文件是否存在
    if not os.path.exists(file_path):
        print(f"文件不存在: {file_path}")
        raise FileNotFoundError(f"无法找到数据文件: {file_path}")
    
    # 加载数据
    df = pd.read_csv(file_path)
    print(f"成功加载数据，形状: {df.shape}")
    return df

def prepare_data_for_lstm(df, sequence_length=24):
    """
    准备LSTM模型的训练和测试数据，优化特征处理
    
    参数:
    df: 数据DataFrame
    sequence_length: 序列长度，增加到24以捕捉更长时间依赖
    
    返回:
    X_train, X_test, y_train, y_test: 训练集和测试集
    feature_names: 特征名称列表
    scalers: 用于特征和目标变量的缩放器
    """
    print("\n准备LSTM训练和测试数据...")
    
    # 删除不需要的列
    time_cols = ['Date', 'Time', 'DateTime']
    time_cols = [col for col in time_cols if col in df.columns]
    if time_cols:
        df = df.drop(columns=time_cols)
        print(f"已删除时间列: {time_cols}")
    
    # 识别重要特征
    important_features = [
        'Sound_Mean_x_S5_CO2', 'Sound_Max', 'Light_Mean_x_Sound_Mean',
        'Light_Max', 'Sound_Var', 'Light_Mean', 'Light_Var',
        'Sound_Mean', 'Temp_Mean_x_Light_Mean', 'Light_Mean_x_S5_CO2'
    ]
    
    # 确保所有重要特征都在数据集中
    important_features = [f for f in important_features if f in df.columns]
    print(f"识别出的重要特征: {important_features}")
    
    # 分离特征和目标变量
    X = df.drop(columns=['Room_Occupancy_Count'])
    y = df['Room_Occupancy_Count'].values.reshape(-1, 1)
    
    # 保存特征名称
    feature_names = X.columns.tolist()
    print(f"特征数量: {len(feature_names)}")
    
    # 创建特征分组
    important_indices = [feature_names.index(f) for f in important_features if f in feature_names]
    other_indices = [i for i in range(len(feature_names)) if i not in important_indices]
    
    # 对重要特征和其他特征分别进行缩放
    important_scaler = StandardScaler()  # 使用标准化而非最小最大缩放
    other_scaler = MinMaxScaler(feature_range=(0, 1))
    y_scaler = MinMaxScaler(feature_range=(0, 1))
    
    # 分别缩放
    if important_indices:
        X_important = important_scaler.fit_transform(X.iloc[:, important_indices])
    else:
        X_important = np.array([]).reshape(X.shape[0], 0)
    
    if other_indices:
        X_other = other_scaler.fit_transform(X.iloc[:, other_indices])
    else:
        X_other = np.array([]).reshape(X.shape[0], 0)
    
    # 合并缩放后的特征
    X_scaled = np.hstack([X_important, X_other]) if X_important.size > 0 and X_other.size > 0 else (X_important if X_important.size > 0 else X_other)
    
    # 标准化目标变量
    y_scaled = y_scaler.fit_transform(y)
    
    # 创建序列数据
    X_sequences = []
    y_sequences = []
    
    for i in range(len(X_scaled) - sequence_length):
        X_sequences.append(X_scaled[i:i+sequence_length])
        y_sequences.append(y_scaled[i+sequence_length])
    
    X_sequences = np.array(X_sequences)
    y_sequences = np.array(y_sequences)
    
    # 划分训练集和测试集 - 使用分层抽样
    # 对于回归问题，创建伪分层
    y_binned = pd.cut(y_sequences.flatten(), bins=5, labels=False)
    
    X_train, X_test, y_train, y_test = train_test_split(
        X_sequences, y_sequences, test_size=0.2, random_state=42, stratify=y_binned
    )
    
    print(f"训练集大小: {X_train.shape}, 测试集大小: {X_test.shape}")
    
    # 保存缩放器和特征索引
    scalers = {
        'important_scaler': important_scaler,
        'other_scaler': other_scaler,
        'y_scaler': y_scaler,
        'important_indices': important_indices,
        'other_indices': other_indices
    }
    
    return X_train, X_test, y_train, y_test, feature_names, scalers, sequence_length

def create_lstm_model(input_shape, lstm_units=64, dropout_rate=0.2):
    """
    创建优化的LSTM模型
    
    参数:
    input_shape: 输入形状 (sequence_length, n_features)
    lstm_units: LSTM层的单元数
    dropout_rate: Dropout率
    
    返回:
    创建的模型
    """
    model = Sequential()
    
    # 使用双向LSTM以更好地捕捉时序模式
    model.add(Bidirectional(LSTM(units=lstm_units, return_sequences=True), input_shape=input_shape))
    model.add(Dropout(dropout_rate))
    
    # 第二个LSTM层，减少层数避免过拟合
    model.add(LSTM(units=lstm_units//2))
    model.add(Dropout(dropout_rate))
    
    # 输出层
    model.add(Dense(units=1))
    
    # 编译模型
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
    model.compile(optimizer=optimizer, loss='mean_squared_error')
    
    return model

def train_lstm_model(X_train, y_train, input_shape, epochs=150, batch_size=32):
    """
    训练LSTM模型
    
    参数:
    X_train: 训练特征
    y_train: 训练标签
    input_shape: 输入形状
    epochs: 训练轮数，增加到150以确保充分学习
    batch_size: 批量大小
    
    返回:
    训练好的模型和训练历史
    """
    print("\n开始训练LSTM模型...")
    
    # 创建模型
    model = create_lstm_model(input_shape)
    
    # 设置早停，增加patience以避免过早停止
    early_stopping = EarlyStopping(
        monitor='val_loss',
        patience=20,
        restore_best_weights=True
    )
    
    # 添加学习率调度器
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
        monitor='val_loss',
        factor=0.2,
        patience=10,
        min_lr=0.0001
    )
    
    # 训练模型
    history = model.fit(
        X_train, y_train,
        epochs=epochs,
        batch_size=batch_size,
        validation_split=0.2,
        callbacks=[early_stopping, reduce_lr],
        verbose=1
    )
    
    print("LSTM模型训练完成")
    return model, history

def evaluate_model(model, X_test, y_test, scalers):
    """
    评估LSTM模型性能
    
    参数:
    model: 训练好的模型
    X_test: 测试特征
    y_test: 测试标签
    scalers: 用于特征和目标变量的缩放器
    
    返回:
    评估指标字典和预测值
    """
    print("\n评估LSTM模型性能...")
    
    # 预测测试集
    y_pred_scaled = model.predict(X_test)
    
    # 反向转换预测值和实际值
    y_scaler = scalers['y_scaler']
    y_pred = y_scaler.inverse_transform(y_pred_scaled)
    y_true = y_scaler.inverse_transform(y_test)
    
    # 计算评估指标
    mse = mean_squared_error(y_true, y_pred)
    rmse = np.sqrt(mse)
    mae = mean_absolute_error(y_true, y_pred)
    r2 = r2_score(y_true, y_pred)
    
    # 打印评估结果
    print(f"均方误差 (MSE): {mse:.4f}")
    print(f"均方根误差 (RMSE): {rmse:.4f}")
    print(f"平均绝对误差 (MAE): {mae:.4f}")
    print(f"决定系数 (R²): {r2:.4f}")
    
    # 返回评估指标
    metrics = {
        'mse': mse,
        'rmse': rmse,
        'mae': mae,
        'r2': r2
    }
    
    return metrics, y_pred, y_true

def visualize_results(y_true, y_pred, history, output_dir='../static/model_results'):
    """
    可视化LSTM模型结果
    
    参数:
    y_true: 真实标签
    y_pred: 预测标签
    history: 训练历史
    output_dir: 输出目录
    """
    print("\n可视化LSTM模型结果...")
    
    # 创建输出目录
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    # 1. 预测值与实际值对比散点图
    plt.figure(figsize=(10, 6))
    plt.scatter(y_true, y_pred, alpha=0.5)
    plt.plot([0, max(y_true.max(), y_pred.max())], [0, max(y_true.max(), y_pred.max())], 'r--')
    plt.xlabel('实际值')
    plt.ylabel('预测值')
    plt.title('LSTM模型: 预测值 vs 实际值')
    plt.grid(True)
    plt.savefig(os.path.join(output_dir, 'LSTM模型_预测值与实际值对比.png'))
    plt.close()
    
    # 2. 预测误差直方图
    errors = y_true.flatten() - y_pred.flatten()
    plt.figure(figsize=(10, 6))
    plt.hist(errors, bins=30, alpha=0.7, color='skyblue')
    plt.xlabel('预测误差')
    plt.ylabel('频率')
    plt.title('LSTM模型: 预测误差分布')
    plt.grid(True)
    plt.savefig(os.path.join(output_dir, 'LSTM模型_预测误差分布.png'))
    plt.close()
    
    # 3. 训练和验证损失曲线
    plt.figure(figsize=(10, 6))
    plt.plot(history.history['loss'], label='训练损失')
    plt.plot(history.history['val_loss'], label='验证损失')
    plt.xlabel('轮数')
    plt.ylabel('损失')
    plt.title('LSTM模型: 训练和验证损失')
    plt.legend()
    plt.grid(True)
    plt.savefig(os.path.join(output_dir, 'LSTM模型_训练验证损失.png'))
    plt.close()
    
    # 4. 预测序列图 (取前100个样本)
    n_samples = min(100, len(y_true))
    plt.figure(figsize=(12, 6))
    plt.plot(range(n_samples), y_true[:n_samples].flatten(), 'b-', label='实际值')
    plt.plot(range(n_samples), y_pred[:n_samples].flatten(), 'r-', label='预测值')
    plt.xlabel('样本')
    plt.ylabel('房间占用人数')
    plt.title('LSTM模型: 预测序列 vs 实际序列')
    plt.legend()
    plt.grid(True)
    plt.savefig(os.path.join(output_dir, 'LSTM模型_预测序列.png'))
    plt.close()
    
    print(f"可视化结果已保存至: {output_dir}")

def save_model_and_scalers(model, scalers, sequence_length, output_dir='../model/saved_models'):
    """
    保存LSTM模型和缩放器
    
    参数:
    model: 训练好的模型
    scalers: 用于特征和目标变量的缩放器
    sequence_length: 序列长度
    output_dir: 输出目录
    """
    # 创建输出目录
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    # 保存模型
    model_path = os.path.join(output_dir, 'lstm_model.h5')
    model.save(model_path)
    print(f"\nLSTM模型已保存至: {model_path}")
    
    # 保存缩放器和序列长度
    scaler_path = os.path.join(output_dir, 'lstm_scalers.joblib')
    joblib.dump({
        'important_scaler': scalers['important_scaler'],
        'other_scaler': scalers['other_scaler'],
        'y_scaler': scalers['y_scaler'],
        'important_indices': scalers['important_indices'],
        'other_indices': scalers['other_indices'],
        'sequence_length': sequence_length
    }, scaler_path)
    print(f"LSTM缩放器已保存至: {scaler_path}")

def lstm_pipeline(epochs=150, batch_size=32, sequence_length=24):
    """
    LSTM模型完整流程
    
    参数:
    epochs: 训练轮数，增加到150
    batch_size: 批量大小
    sequence_length: 序列长度，增加到24
    
    返回:
    训练好的模型和评估指标
    """
    print("开始LSTM模型流程...")
    
    # 1. 加载数据
    df = load_processed_data()
    
    # 2. 准备数据
    X_train, X_test, y_train, y_test, feature_names, scalers, sequence_length = prepare_data_for_lstm(df, sequence_length)
    
    # 3. 训练模型
    input_shape = (X_train.shape[1], X_train.shape[2])
    model, history = train_lstm_model(X_train, y_train, input_shape, epochs, batch_size)
    
    # 4. 评估模型
    metrics, y_pred, y_true = evaluate_model(model, X_test, y_test, scalers)
    
    # 5. 可视化结果
    visualize_results(y_true, y_pred, history)
    
    # 6. 保存模型和缩放器
    save_model_and_scalers(model, scalers, sequence_length)
    
    print("\nLSTM模型流程完成!")
    return model, metrics

if __name__ == "__main__":
    # 运行LSTM模型流程
    lstm_pipeline(epochs=150, batch_size=32, sequence_length=24)