import os, sys
sys.path.append("..")
sys.path.append(os.getcwd())

import pandas as pd
import paddle
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import data_process.species_data_process as sp
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import GRU
from keras.layers import Dense

from model.GRU_Model import GRUModel
from model.MyGRU_Model import MyGRUModel

from functools import partial

# 筛选出5个要描述的物种
species_list = ['Plankton', 'Small Fish', 'Medium Fish', 'Big Fish', 'Squid']
# species_list = ['Plankton']
data = {}
df={}

# 设定设备，CPU或GPU
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = 'cpu'

# 设定参数
epochs = 5  # 训练轮数
batch_size = 72
input_dim = 1  # 输入特征维度
hidden_dim = 32  # 隐藏层维度
output_dim = 1  # 输出维度，预测数量变化
lr = 0.001   #学习率


# 读取每个物种的数量
for species in species_list:
    # 读取物种CSV文件
    data[species]=pd.read_csv('data_resource\species\\'+species+'_data.csv')
    # 转换时间列为日期时间格式
    # print(data[species])
    data[species]['Timestamp'] = pd.to_datetime(data[species]['Timestamp'])
    # 找到最早日期作为基准日期
    baseline_date = data[species]['Timestamp'].min()

    # 创建DataFrame
    df[species] = pd.DataFrame({'Time': data[species]['Timestamp'], 'Quantity_Changed': data[species]['Quantity_Changed']})
    # 将时间戳转换为相对于基准日期的天数
    df[species]['Days_From_Baseline'] = (df[species]['Time'] - baseline_date).dt.days
    values = df[species]['Quantity_Changed']

    # 对物种数量进行数据处理
    reframed,scaler = sp.species_data_process(values)

    # 预测数据准备
    values = reframed.values
    print(values)

    # 划分训练集和测试集
    train, test = train_test_split(values, test_size=0.4, shuffle=False)

    # 拆分输入输出，使用NumPy数组进行切片
    train_X, train_y = train[:, :-1], train[:, -1]
    test_X, test_y = test[:, :-1], test[:, -1]
    #reshape输入为LSTM的输入格式 reshape input to be 3D [samples, timesteps, features]
    train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
    test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
    print ('train_x.shape, train_y.shape, test_x.shape, test_y.shape')
    print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
    # 设置超参数
    num_units = 32  # GRU 层中的单元数
    num_layers = 2  # GRU 层数
    learning_rate = 0.0001  # 学习率
    batch_size = 72  # 调整批量大小
        
    # 创建 GRU 模型
    model_gru = Sequential()
    model_gru.add(GRU(units=num_units, input_shape=(train_X.shape[1], train_X.shape[2]), return_sequences=True))
    for _ in range(num_layers - 1):
        model_gru.add(GRU(units=num_units, return_sequences=True))
    model_gru.add(Dense(units=1))

    # 编译 GRU 模型，指定优化器和学习率
    optimizer = Adam(lr=learning_rate)
    model_gru.compile(loss='mae', optimizer=optimizer)

    # 模型训练 fit network
    history = model_gru.fit(train_X, train_y, epochs=5, batch_size=batch_size, validation_data=(test_X, test_y), verbose=2,
                                shuffle=False)
    
    #输出 plot history
    plt.plot(history.history['loss'], label='train')
    plt.plot(history.history['val_loss'], label='test')
    plt.legend()
    plt.show()

    # 模型预测及数据准备
    yhat = model_gru.predict(test_X)
    test_X_reshaped = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))

    # 进行预测 make a prediction
    yhat = model_gru.predict(test_X_reshaped)
    test_X_reshaped = test_X_reshaped.reshape((test_X_reshaped.shape[0], test_X_reshaped.shape[2]))

    # 预测数据逆缩放 invert scaling for forecast
    inv_yhat = yhat[:, 0]
    inv_yhat = inv_yhat.reshape(-1, 1)
    inv_yhat = scaler.inverse_transform(inv_yhat)
    inv_yhat = inv_yhat[:, 0]

    # 真实数据逆缩放 invert scaling for actual
    test_y = test_y.reshape(-1, 1)
    inv_y = scaler.inverse_transform(test_y)
    inv_y = inv_y[:, 0]

    # 画出真实数据和预测数据
    plt.figure(figsize=(10, 6))
    plt.plot(inv_yhat, label='prediction')
    plt.plot(inv_y, label='true')
    plt.title(f'GRU Prediction for {species}')
    plt.legend()
    plt.show()