import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, Dense, RepeatVector, TimeDistributed, Attention, Concatenate
import warnings

warnings.filterwarnings('ignore')

# 设置matplotlib支持中文
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

# 设置随机种子保证结果可重现
np.random.seed(42)
tf.random.set_seed(42)

# 1. 读入数据
precip_df = pd.read_excel('降水量.xlsx', engine='openpyxl')


def create_seq2seq_sequences(data, input_length=30, output_length=1):
    """创建序列到序列的数据格式"""
    X, y = [], []
    for i in range(len(data) - input_length - output_length + 1):
        X.append(data[i:(i + input_length)])
        y.append(data[(i + input_length):(i + input_length + output_length)])
    return np.array(X), np.array(y)


# 数据标准化
scaler = MinMaxScaler()
precipitation_scaled = scaler.fit_transform(precip_df[['precipitation']])

sequence_length = 30
features = 1

X_seq2seq, y_seq2seq = create_seq2seq_sequences(precipitation_scaled, input_length=30, output_length=1)

# 划分训练集和测试集
split_ratio = 0.7
split_index = int(len(X_seq2seq) * split_ratio)

X_seq2seq_train, X_seq2seq_test = X_seq2seq[:split_index], X_seq2seq[split_index:]
y_seq2seq_train, y_seq2seq_test = y_seq2seq[:split_index], y_seq2seq[split_index:]


# 构建简单的Seq2Seq模型（无注意力）
def build_simple_seq2seq(sequence_length, features, units=50):
    """构建简单的序列到序列模型"""
    model = tf.keras.models.Sequential()
    model.add(LSTM(units, activation='relu', input_shape=(sequence_length, features)))
    model.add(RepeatVector(1))  # 输出序列长度
    model.add(LSTM(units, activation='relu', return_sequences=True))
    model.add(TimeDistributed(Dense(1)))

    model.compile(optimizer=Adam(learning_rate=0.001), loss='mse', metrics=['mae'])
    return model


# 构建带注意力机制的Seq2Seq模型
def build_attention_seq2seq(sequence_length, features, units=50):
    """构建带注意力机制的序列到序列模型"""
    # 编码器
    encoder_inputs = Input(shape=(sequence_length, features))
    encoder_lstm = LSTM(units, return_sequences=True, return_state=True, dropout=0.2)
    encoder_outputs, state_h, state_c = encoder_lstm(encoder_inputs)

    # 解码器
    decoder_inputs = Input(shape=(1, features))
    decoder_lstm = LSTM(units, return_sequences=True, return_state=True, dropout=0.2)

    # 初始化解码器状态
    decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=[state_h, state_c])

    # 注意力机制
    attention = Attention()
    context_vector = attention([decoder_outputs, encoder_outputs])

    # 合并解码器输出和上下文向量
    decoder_combined_context = Concatenate(axis=-1)([decoder_outputs, context_vector])

    # 输出层
    outputs = TimeDistributed(Dense(units, activation='relu'))(decoder_combined_context)
    outputs = TimeDistributed(Dense(1))(outputs)

    model = Model([encoder_inputs, decoder_inputs], outputs)
    model.compile(optimizer=Adam(learning_rate=0.001), loss='mse', metrics=['mae'])
    return model


# 构建模型
model_attention_seq2seq = build_attention_seq2seq(sequence_length, features)
model_simple_seq2seq = build_simple_seq2seq(sequence_length, features)

# 为Seq2Seq模型准备解码器输入（初始化为0）
decoder_input_train = np.zeros((len(X_seq2seq_train), 1, features))
decoder_input_test = np.zeros((len(X_seq2seq_test), 1, features))

# 训练带注意力的Seq2Seq模型
print("训练带注意力机制的Seq2Seq模型...")
history_attention_seq2seq = model_attention_seq2seq.fit(
    [X_seq2seq_train, decoder_input_train], y_seq2seq_train,
    epochs=100,
    batch_size=32,
    validation_data=([X_seq2seq_test, decoder_input_test], y_seq2seq_test),
    verbose=1
)

# 训练简单Seq2Seq模型
print("训练简单Seq2Seq模型...")
history_simple_seq2seq = model_simple_seq2seq.fit(
    X_seq2seq_train, y_seq2seq_train,
    epochs=100,
    batch_size=32,
    validation_data=(X_seq2seq_test, y_seq2seq_test),
    verbose=1
)

# 模型预测
# 注意力Seq2Seq模型预测
attention_seq2seq_predictions = model_attention_seq2seq.predict([X_seq2seq_test, decoder_input_test])
attention_seq2seq_predictions = scaler.inverse_transform(attention_seq2seq_predictions.reshape(-1, 1))

# 简单Seq2Seq模型预测
simple_seq2seq_predictions = model_simple_seq2seq.predict(X_seq2seq_test)
simple_seq2seq_predictions = scaler.inverse_transform(simple_seq2seq_predictions.reshape(-1, 1))

# 真实值
y_test_actual = scaler.inverse_transform(y_seq2seq_test.reshape(-1, 1))

# 创建完整的图表
plt.figure(figsize=(20, 15))


# 计算评估指标
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score


def evaluate_model(y_true, y_pred, model_name):
    mse = mean_squared_error(y_true, y_pred)
    mae = mean_absolute_error(y_true, y_pred)
    rmse = np.sqrt(mse)
    r2 = r2_score(y_true, y_pred)

    print(f"\n{model_name} 评估指标:")
    print(f"均方误差 (MSE): {mse:.4f}")
    print(f"平均绝对误差 (MAE): {mae:.4f}")
    print(f"均方根误差 (RMSE): {rmse:.4f}")
    print(f"决定系数 (R²): {r2:.4f}")

    return mse, mae, rmse, r2


# 评估两个模型
print("=" * 50)
attention_metrics = evaluate_model(y_test_actual, attention_seq2seq_predictions, "带注意力机制的Seq2Seq模型")
simple_metrics = evaluate_model(y_test_actual, simple_seq2seq_predictions, "简单Seq2Seq模型")

# 模型比较
print("\n" + "=" * 50)
print("模型性能比较:")
print(
    f"注意力模型 vs 简单模型 RMSE 改进: {((simple_metrics[2] - attention_metrics[2]) / simple_metrics[2] * 100):.2f}%")
print(f"注意力模型 vs 简单模型 R² 改进: {((attention_metrics[3] - simple_metrics[3]) * 100):.2f}%")