from rai.tagger.chunker import *
from rai.tagger.tagger import *
import nltk, re, pprint

def testChunker():
    print "TEST CHUNKER"
    sentence = "The quick brown fox jumps over the lazy dog"
    chunkStore = ChunkStore()
    bigramChunker = chunkStore.loadChunker("bigramChunker")
    print "CHUNKER LOADED"
    chunk_data = [[(t,c) for w,t,c in nltk.chunk.tree2conlltags(chtree)]
                  for chtree in nltk.corpus.conll2000.chunked_sents('train.txt')]
    tokens = [t for t,c in chunk_data[0]]
    ch = [c for t,c in chunk_data[0]]
    print tokens
    print ch
    print list(bigramChunker.tag(tokens))
    print "END TEST CHUNKER"
    
def testTagger():
    print "TEST TAGGER"
    sentence = "The quick brown fox jumps over the lazy dog"
    tagStore = TagStore()
    tagger = tagStore.loadTagger("brown_a")
    print "TAGGER LOADED"
    words = sentence.split()
    #print tagger.tag(words)
    sentence2 = "Anna Mae Bullock, better known by her stage name Tina Turner is an American singer, dancer and entertainer"
    words = sentence2.split()
    tagged_words = tagger.tag(words)
    #print tagger_words
    print "END TEST TAGGER"
    

    
def testChunkTags(tokens):
    print "LOADING CHUNKER"
    chunkStore = ChunkStore()
    bigramChunker = chunkStore.loadChunker("bigramChunker")
    print "CHUNKER LOADED"
    chunked = list(bigramChunker.tag(tokens))
    return chunked

def testTagSentence(sentence):
    print "LOADING TAGGER"
    tagStore = TagStore()
    #tagger = tagStore.loadTagger("brown_a")
    tagger = tagStore.loadTagger("brown_2")
    print "TAGGER LOADED"
    words = sentence.split()
    tagged_words = tagger.tag(words)
    return tagged_words
    
    
def testSimpleTaggerAndSimpleChunker():
    print "START testSimpleTaggerAndSimpleChunker"
    #sentence = "John is a boy"    
    #sentence = "John and Jack are boys"
    #sentence = "We saw the little yellow dog"
    #sentence = "He accepted the position"
    #sentence = "the dog walked in the house"
    #sentence = "Over a cup of coffee, Mr. Stone told his story."
    #sentence = "A dog told his story at his little home in the morning"
    #sentence = "I have been waiting"
    #sentence = "Have have you been waiting for me?" # not correct
    sentence = "The girl saw that the man said something to the boy that went to the elephant" # example sentence for in the report
    tagged_words = testTagSentence(sentence) # returns: [(word, tag), (word, tag), ...]
    print "TAGGED WORDS:"
    print tagged_words
    # convert Brown tags to connll2000 tags
    tagged_words = convertBrownTagListToConnll2000(tagged_words)
    print tagged_words
    tag_tokens = [c_tag for (w, c_tag, b_tag) in tagged_words]
    words = [w for (w, c_tag, b_tag) in tagged_words]
    print "TAGGED TOKENS"
    print tag_tokens
    chunks = testChunkTags(tag_tokens) # [(tag, chunk), (tag, chunk),...]
    org_tag_tokens = [b_tag for w,t,b_tag in tagged_words] # list of tags
    #chunks_and_words = [(w, t, c) for w, (t, c) in zip(words, chunks)]
    chunks_and_words = [(w, org_t, t, c) for w, org_t, (t, c) in zip(words, org_tag_tokens, chunks)]
    print chunks_and_words
    print "CHUNKS"
    print chunks
    chunk_str = "" # convert to conllstr, so we can feed it to the ChunkScore
    print "flat"
    print flattree(chunks_and_words)
    print "rrr"
    for w, b_tag, c_tag, chunk in chunks_and_words:
        if chunk == None:
            brown_tuple = (w, b_tag)
            convertBrownTagToConnll2000Tag(brown_tuple)
            chunk = 'I-NP'
        chunk_str = chunk_str + str(w) + " " + str(b_tag) + " " + str(chunk) + "\n"
    print "d" + chunk_str
    for i in nltk.chunk.conllstr2tree(chunk_str).subtrees():
        print i


    print "END testSimpleTaggerAndSimpleChunker"


def testWithCorpus():
    cp = nltk.RegexpParser(r"""
            NP: {<DT>?<JJ>*<NN.*>+}    # noun phrase chunks
            VP: {<TO>?<VB.*>}          # verb phrase chunks
            PP: {<IN>}                 # prepositional phrase chunks
            """)

    chunkStore = ChunkStore()
    bigramChunker = chunkStore.loadChunker("bigramChunker")
    print "LOADED REGEX AND BIGRAM"
    
    regexChunkscore = nltk.chunk.ChunkScore()
    bigramChunkscore = nltk.chunk.ChunkScore()
    for file in nltk.corpus.treebank_chunk.files()[:10]:
        #for chtree in nltk.corpus.conll2000.chunked_sents('train.txt')]
        for chunk_struct in nltk.corpus.treebank_chunk.chunked_sents(file)[:10]:
            # for each sentence
            wordlist = [word for word, tag, chtag in nltk.chunk.tree2conlltags(chunk_struct)]
            taglist = [tag for word, tag, chtag in nltk.chunk.tree2conlltags(chunk_struct)]
            chunklist = [chtag for word, tag, chtag in nltk.chunk.tree2conlltags(chunk_struct)]

            print "REGEX"
            test_sent = cp.parse(chunk_struct.flatten()) # 
            #print nltk.chunk.tree2conlltags(test_sent)
            #print nltk.chunk.tree2conlltags(chunk_struct)
            regexChunkscore.score(chunk_struct, test_sent) # regex parser score
            #print chunkscore
            #print test_sent
            
            print "BiGram"
            chunks = list(bigramChunker.tag(taglist))
            #chunks =  testChunkTags(taglist) # [(tag, chunk), (tag, chunk),...]
            
            # bigram: chunks and words
            chunks_and_words = [(w, t, c) for w, (t, c) in zip(wordlist, chunks)] # bigram
            
            chunk_str = "" # convert to conllstr, so we can feed it to the ChunkScore
            for w, (t, c) in zip(wordlist, chunks):
                chunk_str = chunk_str + w + " " + t + " " + c + "\n"

            bigramChunkscore.score(chunk_struct, nltk.chunk.conllstr2tree(chunk_str))
            
            # compare BIGRAM AND SOURCE
            #for left, right in zip(chunks_and_words, nltk.chunk.tree2conlltags(chunk_struct)):
            #    print str(left) + "\t" + str(right)
        # end for sentence in file
    # end file
    print "Regex"
    print regexChunkscore
    print "Bigram"
    print bigramChunkscore
  
  
def testCorpusDifStruct():
    print "COMPARE treebank AND conll2000"
    # treebank
    for file in nltk.corpus.treebank_chunk.files()[:1]:
        for chunk_struct in nltk.corpus.treebank_chunk.chunked_sents(file)[:1]:
            print chunk_struct
    for chtree in nltk.corpus.conll2000.chunked_sents('train.txt')[:1]:
        print chtree

testSimpleTaggerAndSimpleChunker()
#s = [['Have'], ['NP', 'you', 'been'], ['PP', 'blah'], ['VP', 'waiting'], ['PP', 'for'], ['NP', 'me?'], ['PP', 'always'], ['PP', 'in'],['NP', 'here']]
#print collapseParts(s)
#testWithCorpus()
#testCorpusDifStruct()
