from rai.tagger.tagger import *
from rai.tagger.chunker import *
from rai.logger.logger import *
from nltk.tokenize import *

class SentenceParser:
    def __init__(self):
        # init the parser
        # load the tagger
        log("LOADING TAGGER")
        tagStore = TagStore()
        #tagger = tagStore.loadTagger("brown_a")
        self.tagger = tagStore.loadTagger("brown_2")
        log("TAGGER LOADED")
        
        log("LOADING CHUNKER")
        chunkStore = ChunkStore()
        self.chunker = chunkStore.loadChunker("bigramChunker")
        log("CHUNKER LOADED")        
        
    def __tagSentence(self, sentence):
        words = sentence.split() # just split on spaces
        
        # also treat comma's, etc as tokens
        #tokenizer = WordPunctTokenizer()
        #words = tokenizer.tokenize(sentence)
        tagged_words = self.tagger.tag(words)
        return tagged_words

    def __chunkTokens(self, tokens):
        chunked = list(self.chunker.tag(tokens))
        return chunked        

    def getSentence(self, parsed_sentence):
        words = []
        for chunk in parsed_sentence:
            for part in chunk[1:]:
                words.append(part[0])
        return " ".join(words)        

    def formatParsedSentence(self, parsed_sentence):
        result = []
        result.append(self.getSentence(parsed_sentence))
        for chunk in parsed_sentence:
            result.append("\t"+chunk[0])
            for part in chunk[1:]:
                result.append("\t\t" + str(part))
        return "\n".join(result)
    
    def parseSentence(self, sentence, collapse_PP=True):
        """ Parses a sentence:
            1. First tag the sentence with POS-tags
            2. Groups the tags into larger chunks
            3. format the result
            @param sentence: the sentence that should be parsed (string)
            @param collapse_PP: if true, collapse PP and the next NP part to a single PP
        """
        # Step 0, clean the sentence
        #  * place spaces between <WORD><COMMA>
        # Step 1, tag the words
        tagged_words = self.__tagSentence(sentence) # returns: [(word, tag), (word, tag), ...]
        debug("tagged:" + str(tagged_words))
        # convert the Brown-tags to Connll2000 tags
        tagged_words = convertBrownTagListToConnll2000(tagged_words)
        debug("tags converted:" + str(tagged_words))
        tag_tokens = [t for w,t,b_tag in tagged_words] # list of tags
        words = [w for w,t,b_tag in tagged_words] # list of words
            
        # Step 2, chunk
        chunks = self.__chunkTokens(tag_tokens) # returns [(tag, chunk), (tag, chunk),...]
        org_tag_tokens = [b_tag for w,t,b_tag in tagged_words] # list of tags
        chunks_and_words = [(w, org_t, t, c) for w, org_t, (t, c) in zip(words, org_tag_tokens, chunks)]
        
        debug("C&W:" + str(chunks_and_words))
        # Check for a chunk with value None
        miss_chunked = filter(lambda (_w, _org_t, _t, _c): _c == None, chunks_and_words)

        if len(miss_chunked) > 0:
            # chunk with None
            raise Exception("Miss chunked Exception: " + str(miss_chunked))

        # Step 3
        #print "debug: " + str(chunks_and_words)
        parts = None
        flat_list = flattree(chunks_and_words)
        #print flat_list
        if collapse_PP:
            parts = collapseParts(flat_list)
        else:
            parts = flat_list
        return parts 
    
def convertNPPartToEntity(npPart):
    #entity = npPart[1:][-1].lower() # returns the last word of the NP Part
    entity = npPart[1:][-1] # returns the last word of the NP Part, keep original case
    return entity
    
