import os
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv1D, LSTM, Dense, Dropout, Multiply, GlobalAveragePooling1D, Reshape
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt

# 检查 GPU 是否可用
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
    try:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        print("GPU 加速已启用")
    except RuntimeError as e:
        print(e)


# 获取桌面路径
def get_desktop_path():
    if os.name == 'nt':
        import winreg
        key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders')
        return winreg.QueryValueEx(key, "Desktop")[0]
    else:
        return os.path.join(os.path.expanduser("~"), "Desktop")


# 确保文件路径存在
def ensure_file_path(file_path):
    directory = os.path.dirname(file_path)
    if not os.path.exists(directory):
        os.makedirs(directory)


# 保存模型到指定路径
def save_model(model, file_path):
    ensure_file_path(file_path)
    try:
        model.save(file_path)
        print(f"模型已保存为 {file_path}")
    except Exception as e:
        print(f"保存模型时出错: {e}")


# 数据处理函数
def load_and_preprocess_data(filepath):
    # 从 Excel 文件读取数据
    try:
        data = pd.read_excel(filepath)
        print(data.head())  # 打印数据的前几行检查数据读取
    except FileNotFoundError:
        print(f"错误: 文件 {filepath} 未找到。")
        return None, None, None, None, None, None, None, None

    # 定义必要的列名
    required_columns = ['电流(A)', '电压(V)', 'SOC(%)']
    for col in required_columns:
        if col not in data.columns:
            raise KeyError(f"数据文件中缺少必要的列: {col}")

    # 提取特征数据电流和电压和 SOC%
    current = data['电流(A)'].values
    voltage = data['电压(V)'].values
    soc = data['SOC(%)'].values

    # 将特征数据创建为 DataFrame
    X = pd.DataFrame({'电流(A)': current, '电压(V)': voltage})
    y = soc  # 目标数据

    # 划分数据集为训练集、临时集
    X_train, X_temp, y_train, y_temp = train_test_split(X, y, test_size=0.5, random_state=42)
    # 划分出验证集和测试集以及对应的目标值
    X_val, X_test, y_val, y_test = train_test_split(X_temp, y_temp, test_size=0.4, random_state=42)

    time_steps = 10  # 时间步长，用于创建时间序列数据

    # 创建时间序列数据的内部函数
    def create_sequences(X, y):
        X_seq, y_seq = [], []
        for i in range(len(X) - time_steps):
            X_seq.append(X.iloc[i:i + time_steps].values)
            y_seq.append(y[i + time_steps])
        return np.array(X_seq), np.array(y_seq)

    # 对训练集、验证集、测试集创建时间序列数据
    X_train_seq, y_train_seq = create_sequences(X_train, y_train)
    X_val_seq, y_val_seq = create_sequences(X_val, y_val)
    X_test_seq, y_test_seq = create_sequences(X_test, y_test)

    feature_scaler = MinMaxScaler()  # 用于特征数据的归一化
    target_scaler = MinMaxScaler()  # 用于目标数据的归一化

    # 对特征数据进行归一化处理
    original_shape = X_train_seq.shape
    X_train_seq = feature_scaler.fit_transform(X_train_seq.reshape(-1, 2)).reshape(original_shape)
    X_val_seq = feature_scaler.transform(X_val_seq.reshape(-1, 2)).reshape(X_val_seq.shape)
    X_test_seq = feature_scaler.transform(X_test_seq.reshape(-1, 2)).reshape(X_test_seq.shape)

    # 对目标数据进行归一化处理
    y_train_seq = target_scaler.fit_transform(y_train_seq.reshape(-1, 1))
    y_val_seq = target_scaler.transform(y_val_seq.reshape(-1, 1))
    y_test_seq = target_scaler.transform(y_test_seq.reshape(-1, 1))

    print("\n数据预处理完成!")
    print(f"训练集形状: {X_train_seq.shape}")
    print(f"验证集形状: {X_val_seq.shape}")
    print(f"测试集形状: {X_test_seq.shape}")

    return X_train_seq, X_val_seq, X_test_seq, y_train_seq, y_val_seq, y_test_seq, target_scaler, feature_scaler


# 注意力机制模块函数
def attention_block(inputs, time_steps):
    # 计算注意力权重
    attention = Dense(time_steps, activation='softmax')(inputs)
    # 添加一个维度，使形状为 (batch_size, time_steps, 1)
    attention = tf.expand_dims(attention, axis=-1)
    # 将注意力权重应用到输入数据上
    weighted = Multiply()([inputs, attention])
    # 进行维度调整
    if len(weighted.shape) == 4:
        if weighted.shape[2] == 1:
            weighted = tf.squeeze(weighted, axis=2)
        elif weighted.shape[3] == 1:
            weighted = tf.squeeze(weighted, axis=3)
        else:
            weighted = tf.reduce_sum(weighted, axis=3, keepdims=True)
            weighted = tf.squeeze(weighted, axis=3)
    return weighted


# 构建模型函数
def build_model(input_shape, learning_rate=0.001, lstm_units=64, filters=32):
    inputs = Input(shape=input_shape)  # 定义输入层

    # CNN 部分
    conv1 = Conv1D(filters=filters, kernel_size=3, activation='relu', padding='same')(inputs)
    conv1 = Dropout(0.2)(conv1)  # 添加 Dropout 防止过拟合
    conv2 = Conv1D(filters=filters // 2, kernel_size=3, activation='relu', padding='same')(conv1)
    conv2 = Dropout(0.2)(conv2)

    # LSTM 部分
    lstm1 = LSTM(units=lstm_units, return_sequences=True)(conv2)
    lstm1 = Dropout(0.2)(lstm1)

    # 注意力机制
    attention = attention_block(lstm1, input_shape[0])

    # 输出层，使用全局平均池化和全连接层
    gap = GlobalAveragePooling1D()(attention)
    outputs = Dense(1)(gap)

    model = Model(inputs=inputs, outputs=outputs)  # 创建模型
    model.compile(optimizer=Adam(learning_rate=learning_rate), loss='mse')  # 编译模型

    return model


# 绘制训练历史（损失曲线）的函数
def plot_training_history(history):
    plt.figure(figsize=(10, 6))
    plt.plot(history.history['loss'], label='Training Loss')  # 绘制训练损失曲线
    plt.plot(history.history['val_loss'], label='Validation Loss')  # 绘制验证损失曲线
    plt.title('Model Training History')
    plt.xlabel('Epoch')
    plt.ylabel('Loss (MSE)')
    plt.legend()
    plt.grid(True)
    desktop_path = get_desktop_path()
    save_path = os.path.join(desktop_path, 'training_history.png')
    ensure_file_path(save_path)
    plt.savefig(save_path)  # 保存图片到桌面
    plt.show()


# 绘制预测结果与真实值对比的函数
def plot_predictions(y_true, y_pred, title, scaler=None):
    if scaler:
        y_true = scaler.inverse_transform(y_true.reshape(-1, 1)).flatten()  # 反归一化真实值
        y_pred = scaler.inverse_transform(y_pred.reshape(-1, 1)).flatten()  # 反归一化预测值

    plt.figure(figsize=(12, 6))
    plt.plot(y_true, label='True Values', color='blue', alpha=0.6)  # 绘制真实值曲线
    plt.plot(y_pred, label='Predictions', color='red', alpha=0.6)  # 绘制预测值曲线
    plt.title(title)
    plt.xlabel('Sample Index')
    plt.ylabel('SOC(%)')
    plt.legend()
    plt.grid(True)
    desktop_path = get_desktop_path()
    save_path = os.path.join(desktop_path, f'{title.replace(" ", "_")}.png')
    ensure_file_path(save_path)
    plt.savefig(save_path)  # 保存图片到桌面
    plt.show()


# 主程序函数
def main():
    EPOCHS = 2  # 训练的轮数
    BATCH_SIZE = 32  # 每批数据的大小

    desktop_path = get_desktop_path()
    filepath = os.path.join(desktop_path, 'shuju.xlsx')
    print()

    # 加载和预处理数据
    X_train, X_val, X_test, y_train, y_val, y_test, target_scaler, feature_scaler = load_and_preprocess_data(filepath)
    if X_train is None:
        return

    input_shape = X_train.shape[1:]
    print(f"\n输入形状: {input_shape}")
    model = build_model(input_shape)  # 构建模型
    model.summary()  # 打印模型摘要

    print("\n开始训练模型...")
    history = model.fit(X_train, y_train,
                        validation_data=(X_val, y_val),
                        epochs=EPOCHS,
                        batch_size=BATCH_SIZE,
                        verbose=1)  # 训练模型

    print("\n评估模型性能...")

    # 找到一次充放电数据的范围
    current_data = X_val[:, -1, 0]  # 获取验证集中的电流数据
    sign_changes = np.where(np.diff(np.sign(current_data)))[0]
    if len(sign_changes) > 0:
        start_index = 0
        end_index = sign_changes[0] + 1
        X_val_single_cycle = X_val[start_index:end_index]
        y_val_single_cycle = y_val[start_index:end_index]
    else:
        print("未找到充放电数据的切换点，使用全量验证集数据。")
        X_val_single_cycle = X_val
        y_val_single_cycle = y_val

    val_pred = model.predict(X_val_single_cycle)  # 在截取的验证集上预测

    print("\n生成可视化结果...")
    plot_training_history(history)  # 绘制训练历史（损失曲线）
    plot_predictions(y_val_single_cycle, val_pred, "Validation Set Single Cycle Predictions", target_scaler)  # 绘制验证集一次充放电预测结果

    model_path = os.path.join(desktop_path, 'battery_soc_model.h5')
    save_model(model, model_path)
    print("所有可视化图表已保存为 PNG 文件到桌面")


if __name__ == "__main__":
    main()