import numpy as np
from utils.config import ModelConfig
from model.Bert_LSTM import bert_lstm
import torch
np.random.seed(0)
torch.manual_seed(0)
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
    torch.cuda.manual_seed(0)

config = ModelConfig()
def gen_emb(inputs,net):

    # print(tokenizer_id.shape)
    batch_size = inputs.size(0)
    # batch_size = 32
    # initialize hidden state
    h = net.init_hidden(batch_size)

    if (USE_CUDA):
        inputs = inputs.cuda()

    net.eval()
    with torch.no_grad():
        # get the output from the model
        _,latent_hideen = net(inputs, h)
        return latent_hideen