import torch
import torch.nn as nn
import torch.nn.functional as F



'''
最简单的    3个 时间步数   2个隐藏层

'''

# input_size = 10
# hidden_size = 20
# num_layers = 2
# batch_first = True

# rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=batch_first)
# # rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=False)

# h0 = torch.zeros(num_layers, 5, hidden_size)
# x=torch.randn(5, 3, input_size)  # batch_size=1, seq_len=3, input_size=10
# output,h = rnn(x,h0)  
# print(output.shape)  # torch.Size([5, 3, 20])  batch_size=5, seq_len=3, hidden_size=20
# print(h.shape)  # torch.Size([2, 5, 20])      # num_layers=2, batch_size=5, hidden_size=20



# print(output)
# print(h)



'''
人名分类中，使用1个单词  多个时间序列进行训练，每次传递一个单词  1个单词的向量维度是57 batch_size=1  


'''

# input_size = 57
# hidden_size = 128
# num_layers = 1
# batch_first = True
# batch_size = 1

# seq_len = 1

# rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=batch_first)

# h0 = torch.zeros(num_layers, batch_size, hidden_size)
# x=torch.randn(batch_size, seq_len, input_size) 

# # 假设是2个字母  ab   每个字母是57维度词向量
# inputs = torch.randn(2, 1, 57)  # batch_size=1, seq_len=1, input_size=57

# # 分2次训练
# for i,input_i in enumerate(inputs[0]):
#     x = inputs[i]  # 1,57
#     x = x.unsqueeze(0)  # (1,1,57)
#     output,h0 = rnn(x,h0) # h0 输出的是 (num_layers, batch_size, hidden_size)  (1,1,128) 在输入到下一个rnn时需要传入



# # output,h = rnn(x,h0)  
# print(output.shape)     # （1,1,128）
# print(h0.shape)         # （1,1,128）

# print(output)
# print(h0)



'''
把上面的封装成一个模型

每个RNN 有1个影藏层  输入单词是1个字母  57维度，输出是18维度 h0、output都是（1,1,128）  18维度的输出是经过softmax处理的
每次输入一个字母  1个字母的向量维度是57 batch_size=1



'''






class RNNModel(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, batch_first=True):
        '''
        input_size: 输入数据的特征维度
        hidden_size: 隐藏层的特征维度
        num_layers: RNN的层数
        batch_first: 是否将batch_size放在第一维
        '''
        
        super(RNNModel, self).__init__()
        self.rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=batch_first)
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.batch_first = batch_first
        
        
        self.fc = nn.Linear(hidden_size, 18)  # 添加一个线性层，用于输出 18个维度
        
        self.softmax = nn.LogSoftmax(dim=-1)  # 添加softmax层，输出概率分布
        # self.softmax = nn.Softmax(dim=-1)  # 添加softmax层，输出概率分布

    def forward(self, x,h0=None):
        if h0 is None:
            # 初始化隐藏状态
            h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)  
        
        output, h0 = self.rnn(x, h0)
        output = self.softmax(self.fc(output))  # 对最后一维进行softmax
        return output, h0


# # 测试下
# x = torch.randn(1, 1, 57)  # batch_size=1, seq_len=1, input_size=57
model = RNNModel(input_size=57, hidden_size=128, num_layers=1, batch_first=True)
# output, h0 = model(x)
# print(output.shape)  # torch.Size([1, 1, 128])
# print(h0.shape)  # torch.Size([1, 2, 128])



# 开始测试 多个
model.zero_grad() # 清空梯度
model.train()

# for d in model.parameters():
#     print(d.shape)  # 打印模型参数的形状
#     print('模型当前的梯度',d.grad)  # 打印模型参数的梯度
#     # d.grad = None  # 清空梯度
#     # d.grad = torch.zeros_like(d)  # 清空梯度


criterion = nn.NLLLoss()  # 定义损失函数  



def giv_input_target():
    xx = torch.randn(10, 1, 57)  # batch_size=1, seq_len=1, input_size=57
    yy = torch.tensor([12])
    return xx, yy

def train():
    # 准备多个数据
    xx = torch.randn(10, 1, 57)  # batch_size=1, seq_len=1, input_size=57
    yy = torch.tensor([12])
    h0 = torch.zeros(1,1, 128)  
    
    
    model.zero_grad() # 清空梯度
    for i in range(xx.size()[0]):
        print(f'第{i}次执行')
        x = xx[i]
        x = x.unsqueeze(0)
        
        output, h0 = model(x,h0)

    # print(output.shape)  # torch.Size([1, 1, 18])
    # print(output.squeeze(0).shape)  # torch.Size([1, 18])
    # print(output.squeeze(0))  # torch.Size([1, 18])

    loss = criterion(output.squeeze(0), yy)
    loss.backward()
    print('loss',loss)  # 打印损失
    for d in model.parameters():
        print('更新前的参数',d.data)  # 打印模型参数的形状
        print('模型当前的梯度',d.grad.data)  # 打印模型参数的梯度
        d.data.add_(-0.01, d.grad.data)  # 更新参数
        print('减去的梯度值',d.grad.data*-0.01)  # 打印减去的梯度值
        print('更新后的参数',d.data)
        
        '''
        例如 0.0824-(-0.0006) = 0.0830
        
        更新前的参数 tensor([-0.0824, -0.0186,  0.0601, -0.0242,  0.0645, -0.0836, -0.0006,  0.0099,
            -0.0540, -0.0155,  0.0385,  0.0539, -0.0241, -0.0737,  0.0287, -0.0624,
            0.0658,  0.0607])
        模型当前的梯度 tensor([ 0.0601,  0.0402,  0.0911,  0.0687,  0.0450,  0.0470,  0.0457,  0.0557,
                0.0341,  0.0482,  0.0791,  0.0748, -0.9366,  0.0502,  0.0636,  0.0412,
                0.0447,  0.0472])
        减去的梯度值 tensor([-0.0006, -0.0004, -0.0009, -0.0007, -0.0004, -0.0005, -0.0005, -0.0006,
                -0.0003, -0.0005, -0.0008, -0.0007,  0.0094, -0.0005, -0.0006, -0.0004,
                -0.0004, -0.0005])
        更新后的参数 tensor([-0.0830, -0.0190,  0.0592, -0.0249,  0.0640, -0.0841, -0.0011,  0.0093,
                -0.0543, -0.0160,  0.0377,  0.0531, -0.0147, -0.0742,  0.0281, -0.0629,
                0.0654,  0.0602])
        
        '''


def train_more():
    # 准备多个数据
    # xx = torch.randn(10, 1, 57)  # batch_size=1, seq_len=1, input_size=57
    # yy = torch.tensor([12])
    # h0 = torch.zeros(1,1, 128)  
    
    all_loss = 0
    train_loader = 500
    for _ in range(train_loader):
        xx,yy = giv_input_target()
        h0 = torch.zeros(1,1, 128)  
        model.zero_grad() # 清空梯度
        for i in range(xx.size()[0]):
            x = xx[i]
            x = x.unsqueeze(0)
            output, h0 = model(x,h0)


        loss = criterion(output.squeeze(0), yy)
        loss.backward()
        print('loss',loss)  # 打印损失
        for d in model.parameters():
            # print('更新前的参数',d.data)  # 打印模型参数的形状
            # print('模型当前的梯度',d.grad.data)  # 打印模型参数的梯度
            d.data.add_(-0.01, d.grad.data)  # 更新参数
            # print('减去的梯度值',d.grad.data*-0.01)  # 打印减去的梯度值
            # print('更新后的参数',d.data)
        all_loss += loss.item()
        print('------------------第{}次训练 loss{}------------------'.format(_,loss))

    avg_loss =all_loss/train_loader
    print('平均损失',avg_loss)


train_more()
# print(giv_input_target())