import os, sys
sys.path.append("..")
sys.path.append(os.getcwd())

import pandas as pd
import paddle
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import data_process.species_data_process as sp
from model.GRU_Model import GRUModel
from model.MyGRU_Model import MyGRUModel

from functools import partial

# 筛选出5个要描述的物种
species_list = ['Plankton', 'Small Fish', 'Medium Fish', 'Big Fish', 'Squid']

data = {}
df={}

# 设定设备，CPU或GPU
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = 'cpu'

# 设定参数
epochs = 5  # 训练轮数
batch_size = 72
input_dim = 1  # 输入特征维度
hidden_dim = 32  # 隐藏层维度
output_dim = 1  # 输出维度，预测数量变化
lr = 0.001   #学习率

# # 构建GRU模型并实例化
# model = paddle.Model(GRUModel)
# model.prepare(paddle.optimizer.Adam(learning_rate=lr, parameters=model.parameters()),
#               paddle.nn.CrossEntropyLoss(),
#               paddle.metric.Accuracy())
# model = GRUModel(input_dim, hidden_dim, output_dim)
# model.to(device)

# 定义损失函数和优化器
# 设置损失函数
loss_fn = paddle.nn.CrossEntropyLoss()
# criterion = nn.MSELoss()
# optimizer = torch.optim.Adam(model.parameters(), lr=lr)

# 读取每个物种的数量
for species in species_list:
    # 读取物种CSV文件
    data[species]=pd.read_csv('data_resource\species\\'+species+'_data.csv')
    # 转换时间列为日期时间格式
    # print(data[species])
    data[species]['Timestamp'] = pd.to_datetime(data[species]['Timestamp'])
    # 找到最早日期作为基准日期
    baseline_date = data[species]['Timestamp'].min()

    # 创建DataFrame
    df[species] = pd.DataFrame({'Time': data[species]['Timestamp'], 'Quantity_Changed': data[species]['Quantity_Changed']})
    # 将时间戳转换为相对于基准日期的天数
    df[species]['Days_From_Baseline'] = (df[species]['Time'] - baseline_date).dt.days
    values = df[species]['Quantity_Changed']

    # 对物种数量进行数据处理
    reframed,scaler = sp.species_data_process(values)

    # 预测数据准备
    values = reframed.values
    print(values)

    # 划分训练集和测试集
    train, test = train_test_split(values, test_size=0.2, shuffle=False)

    # 拆分输入输出，使用NumPy数组进行切片
    train_X, train_y = train[:, :-1], train[:, -1]
    test_X, test_y = test[:, :-1], test[:, -1]
    #reshape输入为LSTM的输入格式 reshape input to be 3D [samples, timesteps, features]
    train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
    test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
    print ('train_x.shape, train_y.shape, test_x.shape, test_y.shape')
    print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)

    species_size = len(df[species])
    num_classes =  len(train)
    seq_len = len(df[species])
    # 根据不同的生物数量变化 实例化GRU模型
    model = MyGRUModel(species_size,
                       num_classes,
                       direction='forward',
                       pooling_type='max')
    model.train()
    # 设置优化器
    optim = paddle.optimizer.Adam(parameters=model.parameters())
    # 设置损失函数
    loss_fn = paddle.nn.CrossEntropyLoss()


    #3333
    # model = paddle.Model(MyGRUModel(species_size,
    #                    num_classes,
    #                    direction='forward',
    #                    pooling_type='max'))
    # model.prepare(paddle.optimizer.Adam(learning_rate=lr, parameters=model.parameters()),
    #             paddle.nn.CrossEntropyLoss(),
    #             paddle.metric.Accuracy())
    # 设置优化器
    # optim = paddle.optimizer.Adam(parameters=model.parameters())
    # 针对不同物种的数据进行模型训练


    # 2222
    # 模型训练 fit network
    # history = model.fit(train_X, train_y, epochs=15, 
    #                     batch_size=72, 
    #                     verbose=2,
    #                     shuffle=False)
    # #输出 plot history
    # plt.plot(history.history['loss'], label='train')
    # plt.plot(history.history['val_loss'], label='test')
    # plt.legend()
    # plt.show()

    # 1111
    # # 转换数据为PyTorch张量
    # train_X_tensor = torch.from_numpy(train_X).float().to(device)
    # train_y_tensor = torch.from_numpy(train_y).float().to(device)
    # test_X_tensor = torch.from_numpy(test_X).float().to(device)
    # test_y_tensor = torch.from_numpy(test_y).float().to(device)

    # # 训练模型
    # for epoch in range(epochs):
    #     model.train()
    #     optimizer.zero_grad()
    #     outputs = model(train_X_tensor)
    #     loss = criterion(outputs, train_y_tensor)
    #     loss.backward()
    #     optimizer.step()

    #     print(f'Epoch [{epoch + 1}/{epochs}], Loss: {loss.item()}')

    # # 在测试集上进行预测
    # model.eval()
    # with torch.no_grad():
    #     test_outputs = model(test_X_tensor)
    #     test_loss = criterion(test_outputs, test_y_tensor)
    #     print(f'Test Loss: {test_loss.item()}')

    #     # 将预测结果转换为NumPy数组
    #     predicted_values = test_outputs.cpu().numpy()

    #     # 显示预测结果
    #     plt.figure(figsize=(10, 6))
    #     plt.plot(test_y, label='Actual')
    #     plt.plot(predicted_values, label='Predicted')
    #     plt.title(f'GRU Prediction for {species}')
    #     plt.legend()
    #     plt.show()
# print(data)

