import nltk.data
from nltk import word_tokenize
from sentence_generator import Generator
import nltk
nltk.download('punkt')

#niggers
with open("Tarrent.txt", encoding='utf-8', mode="r") as f:
    sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
    sents = sent_detector.tokenize(f.read().strip())
    sent_tokens = [word_tokenize(sent.replace('\n', ' ')) for sent in sents]

#Niggers smell like poop
generator = Generator(sent_tokens, 80)
print(generator.generate())

print("Niggers")