import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from joblib import load

class NN(object):
    def __init__(self):
        self.df = pd.read_csv('./timing/scenic_data.csv')

    def create_dataset(self, n_strps):
        """构建数据"""
        X, y = [], []
        for i in range(len(self.df) - n_strps):
            x_values = self.df.iloc[i:i+n_strps]
            x_values = pd.DataFrame(x_values)
            x_values.iloc[-1, x_values.columns.get_loc('count')] = 0
            X.append(x_values)
            y.append(self.df['count'][i+n_strps - 1])
        return np.array(X), np.array(y)

    def get_model(self):
        n_steps = 7  # 长度七天
        X, y = self.create_dataset(n_steps)

        # 训练集与测试集划分
        train_size = int(len(X) * 0.8)
        X_train, X_test = X[:train_size], X[train_size:]
        y_train, y_test = y[:train_size], y[train_size:]

        model = tf.keras.models.Sequential()
        model.add(tf.keras.layers.LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, X_train.shape[2])))
        model.add(tf.keras.layers.LSTM(50, activation='relu'))
        model.add(tf.keras.layers.Dense(1))
        model.compile(optimizer='adam', loss='mse')
        model.fit(X_train, y_train, epochs=50, validation_data=(X_test, y_test))

        # 评估
        loss = model.evaluate(X_test, y_test)
        print(f"测试集损失: {loss:}")

        # 保存模型
        tf.keras.models.save_model(model, 'NN/my_model.keras')

        # 保存标准化器
        scaler = StandardScaler()
        scaler.fit(X_train.reshape(X_train.shape[0], -1))
        from joblib import dump
        dump(scaler, 'NN/scaler.joblib')

if __name__ == '__main__':
    n = NN()
    n.get_model()