import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf


class NN(object):
    def __init__(self):
        self.df=pd.read_csv('NN/scenic_data.csv')
    
    def create_dataset(self,data,n_steps):
        X,y=[],[]
        for i in range(len(data)-n_steps):
            X.append(data[i:i+n_steps])
            y.append(data[i+n_steps, :18])
        return np.array(X),np.array(y)
    
    def get_model(self):
        n_steps=7
        data=self.df.values
        X,y=self.create_dataset(data,n_steps)
        
        train_size=int(len(X)*0.8)
        X_train,X_test=X[:train_size],X[train_size:]
        y_train,y_test=y[:train_size],y[train_size:]
        
        model=tf.keras.models.Sequential()
        model.add(tf.keras.layers.LSTM(50,activation='relu',return_sequences=True,input_shape=(n_steps,X_train.shape[2])))
        model.add(tf.keras.layers.LSTM(50,activation='relu'))
        model.add(tf.keras.layers.Dense(18))
        model.compile(optimizer='adam',loss='mse')
        model.fit(X_train,y_train,epochs=50,validation_data=(X_test,y_test))
        
        loss=model.evaluate(X_test,y_test)
        print(f"测试机损失:{loss:}")
        
        tf.keras.models.save_model(model,'NN/my_model.keras')
if __name__=="__main__":
    nn=NN()
    nn.get_model()