import nltk, re, pprint
from nltk.sem import relextract
from string import join

def keywords(text):
    doc = filter(lambda x: x != '\"', text) 
    sentences = input_preprocess(doc)
    taggedWords = []
    for s in sentences:
        taggedWords.extend(s)

    filtered = filter(lambda (w,t): t == 'NN' or t == 'NNP', taggedWords)
    return [w.lower() for (w,t) in filtered]

def input_preprocess(doc):
    sentences = nltk.sent_tokenize(doc)
    sentences = [nltk.word_tokenize(s) for s in sentences]
    sentences = [nltk.pos_tag(s) for s in sentences]
    return sentences

def ner(doc):
    pos = input_preprocess(doc)
    chunks = [nltk.ne_chunk(p) for p in pos]
    del pos
    return chunks

    

