import torch
import torch.nn as nn
import torch.optim as optim

import math

from DatasetInit import *
from SimpleRNN   import *

from torch.utils.data import DataLoader


# 假设输入数据为一个 batch，大小为 (batch_size, seq_len, input_size)
seq_len = 16
num_samples = 38400  # 样本数
batch_size = 128

# 模型参数
input_size = 5  # 输入特征维度
hidden_size = 96  # 隐藏层维度
output_size = 10  # 输出大小

initial_lr = 0.001
min_lr = 0.000001
total_epochs = 500


# 创建数据集实例
dataset = CustomDatasetTrain(num_samples,seq_len,input_size,output_size)

# 创建 DataLoader
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

print("data ok")


def cosine_annealing_lr(initial_lr, min_lr, epoch, total_epochs):
    return min_lr + (initial_lr - min_lr) * (1 + math.cos(math.pi * epoch / total_epochs)) / 2

def set_lr(optimizer, new_lr):
    for param_group in optimizer.param_groups:
        param_group['lr'] = new_lr


# 创建模型
model = SimpleRNN(input_size, hidden_size, output_size)

model = nn.DataParallel(model)
model = model.cuda()


criterion = nn.MSELoss() 
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)


##训练
for epoch in range(total_epochs):

    model.train()

    datasetsin = []
    datasetsout = []

    # 获取一个批次的数据
    for batch_inputs, batch_outputs in dataloader:

        batch_inputs = torch.reshape(batch_inputs,[batch_size, seq_len, input_size])
        batch_outputs = torch.reshape(batch_outputs,[batch_size, seq_len, output_size])

        batch_inputs = batch_inputs.cuda()
        batch_outputs = batch_outputs.cuda()

        optimizer.zero_grad()
        outputs_pred = model(batch_inputs)

        # 在每个时间步计算损失
        total_loss = 0
        for t in range(seq_len):  # 遍历每个时间步
            loss = criterion(outputs_pred[:, t, :], batch_outputs[:, t, :])  # 每个时间步的损失

            total_loss += loss  # 累加损失

        # 反向传播
        total_loss.backward()
        optimizer.step()

    
    if (epoch+1)%50==0:

        print(f"Epoch {epoch + 1}, Loss: {total_loss.item()}")

        new_lr = cosine_annealing_lr(initial_lr, min_lr, epoch, total_epochs)
        set_lr(optimizer, new_lr)

        print(f"Epoch {epoch}: Learning Rate = {new_lr:.8f}")


        torch.save(model, "model/full_model.pth")
        print("Full model saved.")
