import pickle
import torch
from torch import nn, optim
from torch.nn import LSTM, modules, Embedding
from config import *
from pre_process import get_embedding_matrix
from some_tests.seq_batch import DynamicEncoder, EncoderRNN
import numpy as np

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
BASE_DIR = './data/'

word2ix = pickle.load(open(BASE_DIR + 'word_index.pkl', 'rb'))
embedding_matrix = get_embedding_matrix(word_index= word2ix)
encoder = EncoderRNN(WORD_DICT_SIZE+1, EMBEDDING_DIM, ENCODER_DIM)
# print(padding)
# print(embedding_matrix[0])
test_data = pickle.load(open(BASE_DIR+TEST_DATA, 'rb'))

encoder_optimizer = optim.SGD(encoder.parameters(), lr=0.01)
# print(test_data[1])
# print(len(test_data[1]))

batch_data = test_data[0][:20]
batch_data = torch.tensor(batch_data)
input_lens = np.asarray([len(i) for i in batch_data])
print(input_lens)
output, hidden = encoder(batch_data, torch.tensor(input_lens))

print('completed')