import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from joblib import load

class NN(object):
    def __init__(self):
        self.loaded_model = tf.keras.models.load_model('NN/my_model.keras')
        self.scaler = load('NN/scaler.joblib')

    def get_hourly_trend(self):
        n_steps = 7  # 长度七天
        df_pivot = pd.read_csv('./NN/scenic_data.csv')
        latest_data = df_pivot.iloc[-n_steps:].values  # 取最后七天数据
        latest_data = latest_data.reshape(1, n_steps, latest_data.shape[1])

        predicted = self.loaded_model.predict(latest_data)
        return predicted

def train_model():
    model = tf.keras.models.Sequential()
    model.add(tf.keras.layers.LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, df_pivot.shape[1])))
    model.add(tf.keras.layers.LSTM(50, activation='relu'))
    model.add(tf.keras.layers.Dense(1))
    model.compile(optimizer='adam', loss='mse')
    model.fit(X_train, y_train, epochs=50, validation_data=(X_test, y_test))

    # 评估
    loss = model.evaluate(X_test, y_test)
    print(f"测试集损失: {loss:}")

    # 保存模型
    tf.keras.models.save_model(model, 'NN/my_model.keras')

if __name__ == '__main__':
    n_steps = 7  # 长度七天
    df_pivot = pd.read_csv('./NN/scenic_data.csv')
    df_pivot = df_pivot.values

    # 数据预处理
    scaler = StandardScaler()
    df_pivot_scaled = scaler.fit_transform(df_pivot)
    X = []
    y = []

    for i in range(len(df_pivot_scaled) - n_steps):
        X.append(df_pivot_scaled[i:(i + n_steps), :])
        y.append(df_pivot_scaled[i + n_steps, 0])  # 假设预测的是第一列

    X = np.array(X)
    y = np.array(y)

    X_train = X[:int(0.8 * len(X))]
    y_train = y[:int(0.8 * len(y))]
    X_test = X[int(0.8 * len(X)):]
    y_test = y[int(0.8 * len(y))]

    train_model()

    n = NN()
    n.get_model()