import torch
from torch.nn import functional as F
from torch import nn
from torch.nn import LSTM

from decoder import LSTMd


def encoder_test():
    embedd_layer = nn.Embedding(5,3)
    dec = LSTMd(3,4)

    t = torch.tensor([[1,2,0], [3, 0, 0]])
    print(t.size())

    embed = embedd_layer(t)
    hx = None
    output, hidden, _ = dec(embed, hx)
    soft_maxed = F.softmax(output, dim=2)
    print('complete')


def rnn_pack_test():
    input_data = torch.tensor([[1,2,0],[1,2,3],[1,0,0]])
    lens = torch.tensor([2,3,1])
    _, idx_sort = torch.sort(lens, dim=0, descending=True)
    idx_unsort, _ = torch.sort(idx_sort, dim=0)
    x_packed = nn.utils.rnn.pack_padded_sequence(input_data[idx_sort],lens[idx_sort], batch_first=True)
    x_padded = nn.utils.rnn.pad_packed_sequence(x_packed, batch_first=True)
    output = x_padded[0][idx_unsort]
    print('complete')



def reverse_lstm():
    lstm = LSTM(3, 4)
    embedd_layer = nn.Embedding(5, 3)
    t = torch.tensor([[1, 0], [3, 0]])
    embed = embedd_layer(t)
    embed_r = embedd_layer(t)
    hx = None
    output, hidden = lstm(embed, hx)
    r_o, r_h = lstm(embed_r, hx)
    return output, hidden


# print(lstm._all_weights)
# print(lstm.all_weights)
# lstm.bias_ih_l0.data[20:40].fill_(0)
# print(lstm.all_weights)


def init_weights(lstm):
    print('随机初始化')
    print(list(lstm.parameters()))
    for name, param in lstm.named_parameters():
      if 'bias' in name:
         nn.init.constant(param, 0.0)
      elif 'weight' in name:
         nn.init.xavier_normal(param)
    print('xavier初始化')
    print(list(lstm.parameters()))


class Encoder(nn.Module):
    def __init__(self):
        super(Encoder, self).__init__()

        self.lstm = LSTM(3,4)
        init_weights(self.lstm)

# model = Encoder()

encoder_test()