import nltk
from nltk.corpus import wordnet
from listize import build_my_words

def wordnet_pos(tag):
    if tag.startswith('J'):
        return wordnet.ADJ
    elif tag.startswith('V'):
        return wordnet.VERB
    elif tag.startswith('N'):
        return wordnet.NOUN
    elif tag.startswith('R'):
        return wordnet.ADV
    else:
        return wordnet.NOUN

def parse(sentence):
    tokens = nltk.word_tokenize(sentence)
    
    pos_tag = [pos[1] for pos in nltk.pos_tag(tokens)]
    roots = [(word, stemmer.lemmatize(word, wordnet_pos(pos_tag[idx]))) for idx, word in enumerate(tokens)]
    # cleanword = [(word, stemm) for word, stemm in roots]
    str=''
    for word,stemm in roots:

        if word.isalpha() and not str=='':
            str = str + ' '
        
        if not word == stemm and not stemm in stop_words:
            str = str + '%s(%s)' % (word, stemm)
        else:
            str = str + word
    
    return str


# my_words = build_my_words()
stop_words = set(nltk.corpus.stopwords.words('english'))
stemmer = nltk.stem.WordNetLemmatizer()


text = """ "We're all pretty well," said Mrs. Rachel. "I was kind of afraid YOU weren't, though, when I saw Matthew starting off today. I thought maybe he was going to the doctor's."
 """
# tokens = nltk.word_tokenize(text)
sens = nltk.sent_tokenize(text)

tokens = [nltk.word_tokenize(sen) for sen in sens]

print(tokens)



""" filename = 'Anne_Of_Green_Gables.txt'
# filename = 'anne.txt'
with open(filename, 'r') as f:
    for line in f.readlines()[:100]:
        line = line.strip()
        if line:
            fline = parse(line)
            print('\033[031m{}\033[0m'.format(line))
            print(fline)

 """