import torch
import paddle.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import paddle
import paddle.nn as nn


# 设定设备，CPU或GPU
# device = paddle.set_device('cuda') if paddle.device.is_compiled_with_cuda() else 'cpu'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 设定设备，CPU或GPU
#device = paddle.device.get_device()

# class GRUModel(paddle.nn.Layer):
#     def __init__(self, input_size, hidden_size, output_size, num_layers=1):
#         super(GRUModel, self).__init__()
#         self.hidden_size = hidden_size
#         self.num_layers = num_layers

#         self.gru = paddle.nn.GRU(input_size, hidden_size, num_layers)
#         self.fc1 = paddle.nn.Linear(hidden_size, 96)
#         self.fc2 = paddle.nn.Linear(96, output_size)

#         # 初始化 Linear 层
#         self.fc1.weight.set_value(paddle.nn.initializer.XavierUniform()([hidden_size, 96]))
#         self.fc1.bias.set_value(paddle.zeros([96]))

#         self.fc2.weight.set_value(paddle.nn.initializer.XavierUniform()([96, output_size]))
#         self.fc2.bias.set_value(paddle.zeros([output_size]))

#     def forward(self, x):
#         batch_size = x.shape[0]
#         hidden = self.init_hidden(batch_size)

#         out, _ = self.gru(x, hidden)
#         out = self.fc1(out[:, -1, :])
#         out = paddle.tanh(out)
#         out = self.fc2(out)
#         return out

#     def init_hidden(self, batch_size):
#         return paddle.zeros([self.num_layers, batch_size, self.hidden_size], dtype='float32')

# 定义GRU模型
# class GRUModel(torch.nn.Module):
#     def __init__(self, input_dim, hidden_dim, output_dim):
#         super(GRUModel, self).__init__()
#         self.hidden_dim = hidden_dim
#         self.gru = nn.GRU(input_dim, hidden_dim)  # 不使用 batch_first=True
#         self.fc = nn.Linear(hidden_dim, output_dim)
#         # 添加一个可训练的线性层
#         self.linear = nn.Linear(10, 10)  # 例如添加一个具有10个输入和10个输出的线性层

#     def forward(self, x):
#         h0 = torch.zeros(1, x.size(0), self.hidden_dim).to(device)  # 初始化隐藏层状态
#         out, _ = self.gru(x.transpose(1, 0), h0)  # 调整输入数据维度
#         out = self.fc(out[-1, :, :])  # 取GRU序列的最后一个输出
#         out = self.linear(out)  # 使用添加的线性层
#         return out


# 定义GRU模型
class GRUModel(torch.nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(GRUModel, self).__init__()
        self.hidden_dim = hidden_dim
        self.gru = nn.GRU(input_dim, hidden_dim)
        self.fc = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        h0 = torch.zeros(1, x.size(0), self.hidden_dim).to(device)  # 初始化隐藏层状态
        out, _ = self.gru(x, h0)
        out = self.fc(out[:, -1, :])  # 取GRU序列的最后一个输出
        return out