import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import tensorflow as tf


class NN(object):
    def __init__(self):
        self.df = pd.read_csv('第七次实验/scenic_data.csv')
        # 读取数据文件

    def create_dataset(self, df, n_steps):
        X, y = [], []
        for i in range(len(df) - n_steps):
            X_values = df[i: i+n_steps]
            X_values = pd.DataFrame(X_values)
            X_values.iloc[-1, X_values.columns.get_loc('count')] - 0
            X.append(X_values)
            y.append(df['count'][i + n_steps - 1])
        return np.array(X), np.array(y)
        # 创建数据集函数，根据给定的步数从数据中提取特征和标签


    def get_model(self):
        n_steps = 7 #步长为7天
        # data = self.df.values
        X, y = self.create_dataset(self.df, n_steps)

        # 划分训练集和测试集
        train_size = int(len(X) * 0.8)
        X_train, X_test = X[:train_size], X[train_size:]
        y_train, y_test = y[:train_size], y[train_size:]

        # 构建模型
        model = tf.keras.models.Sequential()
        model.add(tf.keras.layers.LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, X_train.shape[2])))
        model.add(tf.keras.layers.LSTM(50, activation='relu'))
        model.add(tf.keras.layers.Dense(1))
        model.compile(optimizer='adam', loss='mse')
        model.fit(X_train, y_train, epochs=50, validation_data=(X_test, y_test))

        # 评估
        loss = model.evaluate(X_test, y_test)
        print(f'测试集损失：{loss:}')
        # 保存训练好的模型
        tf.keras.models.save_model(model, '第七次实验/model.keras')
  


if __name__ == "__main__":
    nn = NN()
    model = nn.get_model()