import numpy as np
from torch import nn
import torch
from model.Bert_LSTM import bert_lstm
from utils.config import ModelConfig
np.random.seed(0)
torch.manual_seed(0)
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
    torch.cuda.manual_seed(0)
config = ModelConfig()
def bert_lstm_predict(inputs):
    net = bert_lstm(config.bert_path,
                    config.hidden_dim,
                    config.output_size,
                    config.n_layers,
                    config.bidirectional)
    net.load_state_dict(torch.load(config.save_path))
    net.cuda()
    # 预处理去掉标点符号
    # 转换为字id

    # print(tokenizer_id.shape)
    batch_size = inputs.size(0)
    # batch_size = 32
    # initialize hidden state
    h = net.init_hidden(batch_size)

    if (USE_CUDA):
        inputs = inputs.cuda()

    net.eval()
    with torch.no_grad():
        # get the output from the model
        output, _ = net(inputs, h)
        output = torch.nn.Softmax(dim=1)(output)
        pred = torch.max(output, 1)[1]
        # printing output value, before rounding
        return output,pred