import torch
from configs import BATCH_SIZE, DATA_PATH, EMB_DIM, IN_HIDEN1, IN_HIDEN2, IN_LSTM, LSTM_LAYER, OUT_LSTM, OUT_HIDEN1, OUT_HIDEN2, DEVICE, LEARNING_RATE, EPOCHES, MAX_GEN_LEN, EPOCHES
from dataloader import make_dataloader, get_data
from model import RNN 


def generate(model, start_words, ix2word, word2ix, prefix_words=None):

    results = list(start_words)
    start_word_len = len(start_words)
    # 手动设置第一个词为<START>
    input = torch.Tensor([word2ix['<START>']]).view(1, 1).long()
    input = input.to(DEVICE)
    hidden = None

    if prefix_words:
        for word in prefix_words:          
            output, hidden = model(input, hidden)
            input = input.data.new([word2ix[word]]).view(1, 1)

    for i in range(MAX_GEN_LEN):
        output, hidden = model(input, hidden)

        if i < start_word_len:
            w = results[i]
            input = input.data.new([word2ix[w]]).view(1, 1)
        else:
            top_index = output.data[0].topk(1)[1][0].item()
            w = ix2word[top_index]
            results.append(w)
            input = input.data.new([top_index]).view(1, 1)
        if w == '<EOP>':
            del results[-1]
            break
    return results

_, word2ix, ix2word = get_data(data_path=DATA_PATH)
model = RNN(vocab_size=len(word2ix),embedding_dim=EMB_DIM,input_hidden1=IN_HIDEN1,input_hidden2=IN_HIDEN2,lstm_input=IN_LSTM,lstm_layers=LSTM_LAYER,lstm_output=OUT_LSTM,output_hidden1=OUT_HIDEN1,output_hidden2=OUT_HIDEN2).to(DEVICE)
model.eval()
model.load_state_dict(torch.load('./ckpt/RNN_'+str(EPOCHES)+'.pth',map_location=torch.device(DEVICE)))
result = generate(model=model, start_words="春江潮水连海平", ix2word=ix2word, word2ix=word2ix)

print(result)