import torch
from torch import nn
from d2l import torch as d2l
import text_preprocess as tp

batch_size = 32
num_steps = 35
train_iter,vocab = tp.load_data_time_machine(batch_size,num_steps)

vocab_size = len(vocab)
num_hiddens = 256
num_layers = 2
num_inputs = vocab_size
device = d2l.try_gpu()
lstm_layer = nn.LSTM(num_inputs,num_hiddens,num_layers)
import RNN_model as rm
model = rm.RNN_Model(lstm_layer,vocab_size)
model = model.to(device)
num_epochs, lr = 500, 2
import RNN_train as rt
rt.train(model,train_iter,vocab,lr,num_epochs,device)
