import torch
from torch import nn
from torch.nn import functional as F
import dltools

batchSize = 32
num_steps = 35
train_iter, vocab = dltools.load_data_time_machine(batch_size=batchSize, num_steps=num_steps)

num_hiddens = 256
lstm_layer = nn.LSTM(len(vocab), num_hiddens)
model = dltools.RNNModel(lstm_layer, len(vocab))
model = model.to(dltools.try_gpu())
num_epochs, lr=1000, 0.8
dltools.train_ch8(model, train_iter, vocab, lr, num_epochs, dltools.try_gpu())