from rai.tagger.chunker import *
from sets import Set

def trainBigramChunker():
    trainer = ChunkerTrainer()
    bigramChunker = trainer.getBigramChunker()
    chunkername = "bigramChunker"
    chunkStore = ChunkStore()
    chunkStore.saveChunker(bigramChunker, chunkername)
    
#trainBigramChunker()

def printAllConll2000Tags():
    conll_tags = []
    for (word, tag) in nltk.corpus.conll2000.tagged_words('train.txt'):
        conll_tags.append(tag)
    
    conll_tags_set = Set(conll_tags)
    print len (conll_tags_set)
    for tag in conll_tags_set:
        print tag

def printAllBrownTags():
    brown_tags_set = Set()
    brownCategories = ['a','b','c','d','e','f','h','j','k','l','m','n','p','r' ]
    for (word, tag) in nltk.corpus.brown.tagged_words(categories=brownCategories):  
        brown_tags_set.add(tag)
    print len(brown_tags_set)
    for tag in brown_tags_set:
        print tag

def showBrownTags(input_word):
    tags = Set()
    for (word, tag) in nltk.corpus.brown.tagged_words(simplify_tags=False):
        if word == input_word:  
            tags.add(tag)
    print tags
    
#[('I', 'PRP', 'B-NP'), ('have', 'HV', None), ('been', 'BEN', None), ('waiting', 'VBG', 'B-VP')]
def showConll2000Tags(input_word):
    tags = Set()
    for (word, tag) in nltk.corpus.conll2000.tagged_words('train.txt'):
        if word == input_word:
            tags.add(tag)
    print tags

def showConll2000Words(input_tag):
    words = Set()
    for (word, tag) in nltk.corpus.conll2000.tagged_words('train.txt'):
        if tag == input_tag:
            words.add(word)
    print words

def showBrownWords(input_tag):
    words = Set()
    brownCategories = ['a','b','c','d','e','f','h','j','k','l','m','n','p','r' ]
    for (word, tag) in nltk.corpus.brown.tagged_words(categories=brownCategories,simplify_tags=False):  
        if tag == input_tag:
            words.add(word)
    print words

#print "Stone'".partition("'")
"""
Miss chunked Exception: [('United', 'VBN-TL', None)]
Miss chunked Exception: [('Fourth', 'OD-TL', None), ('nothing', 'PN', None)]
Miss chunked Exception: [("What's", 'WDT+BEZ', None), ('Do', 'DO', None)]
Miss chunked Exception: [("Let's", 'VB+PPO', None)]
"""
#showBrownWords("VB+PPO")
#showBrownTags("to")

#showConll2000Tags("to")
#showConll2000Tags("us")
showConll2000Words("PRP")
#showConll2000Words("VBP")

def sent_contains_word(sent, word):
    return len(filter(lambda x: x == word, sent)) > 0
def sent_contains_wordtag(sent, word, tag):
    return len(filter(lambda x: x[0] == word and x[1] == tag, sent)) > 0    
#print nltk.corpus.conll2000.sents('train.txt')[0]
#print nltk.corpus.conll2000.tagged_sents('train.txt')[0]
#print nltk.corpus.conll2000.chunked_sents('train.txt')[0]
word = "her"
tag = "PRP"
sents = []
for index, s in enumerate(nltk.corpus.conll2000.tagged_sents('train.txt')):
    if sent_contains_wordtag(s, word, tag):
        sents.append(nltk.corpus.conll2000.tagged_sents('train.txt')[index])
        sents.append(nltk.corpus.conll2000.chunked_sents('train.txt')[index])
        
for s in sents:
    print s