import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import tensorflow as tf

class NN(object):
    def __init__(self):
       self.df =pd.read_csv('./NN/scenic_data.csv') 

    def create_dataset(self, data, n_strps):
        """构建数据
        """
        X, y= [], []
        for i in range(len(data) - n_strps):
            X.append(data[i:i+n_strps])
            y.append(data[i+n_strps, :18])
            return np.array(X), np.array(y)

    def get_model(self):
        n_steps = 7
        data = self.df.values
        X, y = self.create_dataset(data, n_steps)

        #训练集
        train_size = int(len(X) *0.8)
        X_train, X_test = X[:train_size], X[train_size:]
        y_train, y_test = y[:train_size], y[train_size:]

        model = tf.keras.models.Sequential()
        model.add(tf.keras.layers.LSTM(50, activation='rule', return_sequences=True, input_shape=(n_steps)))
        model.add(tf.keras.layers.LSTM(50, activation='rule'))
        model.add(tf.keras.layers.Dense(18))
        model.compile(optimizer='adam', loss='mse')
        model.fit(X_train, y_train, epochs=50, validation_data=(X_test, y_test))

        #评估
        loss = model.evalute(X_test, y_test)
        print(f"测试集损失: {loss:}")

        #保持模型
        tf.keras.model.save_model(model,'NN/my_model.keras' )

if __name__ == '__main__':
    n = NN()
    n.get_model()