import torch
from torch import nn
import numpy as np



class LSTMModel(nn.Module):
    def __init__(self, input_num, hidden_num, gru_layers, output_num):
        super(LSTMModel, self).__init__()
        self.hidden_num = hidden_num
        self.LSTM_layer = nn.LSTM(input_size=input_num, hidden_size=hidden_num, num_layers=gru_layers, batch_first=True, bidirectional=True)
        self.output_layer = nn.Sequential(
            nn.Linear(hidden_num*2, output_num),
            nn.ReLU(inplace=True)
        )
        self.softmax_func = nn.Softmax(dim=2)

    def forward(self, x):
        self.LSTM_layer.flatten_parameters()
        x, _ = self.LSTM_layer(x)
        # x = torch.split(x, self.hidden_num, dim=2)
        # x = x[0].add(x[1])
        x = self.output_layer(x)
        x = self.softmax_func(x)
        return x

def inference(x):
    net = LSTMModel(80,512,2,40)
    net_dict = net.state_dict()
    pretrained_dict = torch.load("/home/changhengyi/gru-pytorch/blstmx2/model.pt")
    pretrained_dict = {k.split(".",1)[1]:v for k,v in pretrained_dict.items()}
    # for k,v in pretrained_dict.items():
    #     print(k)
    # print("------------------")
    # for k,v in net_dict.items():
    #     print(k)
    net_dict.update(pretrained_dict)
    net.load_state_dict(net_dict)
    net.eval()
    
    x = torch.Tensor([x])
    return net(x).detach().numpy()


if __name__ == "__main__":
    x = np.loadtxt("/home/changhengyi/gru-pytorch/feat.txt")
    print(x.shape)
    x = inference(x)
    print(x)
    print(x.shape)
    np.savetxt("output.txt", x[0], fmt="%10.5f")