import torchtext
from torchtext.vocab import Vectors
import torch
import numpy as np
import random
import os

def fix_seed(seed=3407):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True


fix_seed()
path = '../../data'
TEXT = torchtext.data.Field(lower=True)
# train, val, test = torchtext.datasets.LanguageModelingDataset.splits(path='../../data/text8',
#     train="text8.train.txt", validation="text8.dev.txt", test="text8.test.txt", text_field=TEXT)

BATCH_SIZE = 32
EMBEDDING_SIZE = 650
MAX_VOCAB_SIZE = 50000
MAX_VOCAB_SIZE = 550000
train, val, test = torchtext.datasets.LanguageModelingDataset.splits(path='../../data/text8',
    train="text8.train.txt", validation="text8.dev.txt", test="text8.test.txt", text_field=TEXT)
# for i in train:
#     for word in i.text:
#         print(word)
#         break

TEXT.build_vocab(train, max_size=MAX_VOCAB_SIZE)
print("vocabulary size: {}".format(len(TEXT.vocab)))

VOCAB_SIZE = len(TEXT.vocab)
train_iter, val_iter, test_iter = torchtext.data.BPTTIterator.splits(
    (train, val, test), batch_size=BATCH_SIZE, device='cpu', bptt_len=50, repeat=False, shuffle=True)
