import numpy as np
from keras import Input, Model
from keras.api.layers import Dense, Dropout, LSTM
from keras.api.models import Sequential
from keras.src.callbacks import EarlyStopping
from keras.src.layers import MultiHeadAttention, LayerNormalization, GlobalAveragePooling1D
from keras.src.optimizers import Adam


def lstm_prediction(train_seq, train_label, test_seq, test_label, upcoming_prediction):
    model = Sequential()
    model.add(LSTM(units=50, return_sequences=True, input_shape=(train_seq.shape[1], train_seq.shape[2])))
    model.add(Dropout(0.2))
    model.add(LSTM(units=50))
    model.add(Dropout(0.2))
    model.add(Dense(train_seq.shape[2]))
    model.compile(optimizer='adam', loss='mse', metrics=['mae', 'mse'])
    model.fit(train_seq, train_label, epochs=100, batch_size=32, validation_data=(test_seq, test_label),
              callbacks=[EarlyStopping(monitor='val_loss', patience=10)], verbose=1)
    curr_seq = test_seq[-1:]
    for i in range(-len(upcoming_prediction), 0):
        up_pred = model.predict(curr_seq)
        upcoming_prediction.iloc[i] = up_pred
        curr_seq = np.append(curr_seq[0][1:], up_pred, axis=0).reshape(test_seq[-1:].shape)


def transformer_model(train_seq, train_label, test_seq, test_label, upcoming_prediction):
    head_size = 64
    num_heads = 4
    ff_dim = 4
    dropout = 0.1
    shape = (train_seq.shape[1], train_seq.shape[2])
    inputs = Input(shape=shape)

    # Multi-Head Attention
    x = MultiHeadAttention(key_dim=head_size, num_heads=num_heads, dropout=dropout)(inputs, inputs)
    x = Dropout(dropout)(x)
    res = x + inputs  # 残差连接

    # Feed Forward
    x = LayerNormalization(epsilon=1e-6)(res)
    x = Dense(ff_dim, activation="relu")(x)
    x = Dropout(dropout)(x)
    x = Dense(shape[-1])(x)
    x = x + res  # 残差连接

    # 全局平均池化 + 输出层
    x = GlobalAveragePooling1D()(x)
    outputs = Dense(train_seq.shape[2])(x)
    model = Model(inputs, outputs)
    model.compile(optimizer=Adam(learning_rate=1e-4), loss='mse')
    model.summary()

    model.fit(
        train_seq, train_label,
        validation_data=(test_seq, test_label),
        epochs=200,
        batch_size=64,
        callbacks=[EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)],
        verbose=1
    )
    curr_seq = test_seq[-1:]
    for i in range(-len(upcoming_prediction), 0):
        up_pred = model.predict(curr_seq)
        upcoming_prediction.iloc[i] = up_pred
        curr_seq = np.append(curr_seq[0][1:], up_pred, axis=0).reshape(test_seq[-1:].shape)