'''
Created on Feb 28, 2011

@author: Chris
'''
import Document
import Section
import Sentence
import RegExpHandler
class DocumentTokenizer(object):
    '''
    provides a way to tokenize the entire document to facilitate suggestion creation.
    '''


    def __init__(self):
        '''
        Constructor
        '''
        self.regExp = RegExpHandler.RegExpHandler()
    
    def tokenizeDocument(self, document):
        """ 
        This method takes a document and tokenizes it into sections for further tokenizing and analysis 
        """
        
        #deal with carriage returns of linux/windows and remove tabs
        working_doc = document.replace("\t", "")
        working_doc = working_doc.replace("\r", "")
        
        sections = self.regExp.splitStringRegExp(working_doc, "\n")
        doc = Document.Document()
        for section in sections:
            newSection = self.tokenizeSection(section)
            if newSection.wordCount > 0:
                doc.addSection(newSection)
        
                
        return doc
    
    def tokenizeSection(self, sectionText):
        """ 
        This method takes a section from the document and tokenizes it into sentences for further tokenizing and analysis 
        """
        section = Section.Section()
        parts = self.regExp.splitStringRegExp(sectionText, "([.?!])") #take care of ... using regexp
        sentences = []
        listPointer = "sentence"
        counter = 0
        for part in parts:
            if listPointer == "punctuation":
                sentences.append(parts[counter-1] + part)
                listPointer = "sentence"
            else:
                listPointer = "punctuation"
            counter+=1
        
        #sentences.pop() #remove empty index at end 
        for sentence in sentences:
            newSentence = self.tokenizeSentence(sentence)
            section.addSentence(newSentence)
            section.wordCount += newSentence.wordCount
        
        return section
    
    def tokenizeSentence(self, sentenceText):
        """ 
        This method takes a sentence from the document and tokenizes it into words for further tokenizing and analysis 
        """

        sentence = Sentence.Sentence()
        
        sentenceText = sentenceText.strip()
        if ((sentenceText[-1] == '.') or (sentenceText[-1] == '?') or (sentenceText[-1] == '!')):
            sentence.punctuation = sentenceText[-1]
            sentenceText = sentenceText[:-1]
        
        sentence.text = sentenceText
            
        words = self.regExp.splitStringRegExp(sentenceText, "\s|;\s|,\s|:\s") 
        for word in words:
            #find syllable
            #find part of speech
            sentence.addWord(word)
        
        return sentence
    
    def isEmpty(self, string):
        return False

if __name__ == '__main__':     
    doc = "Hello, my name is test. I really like to dance and eat butter!\n\t \n\t My other favorite activities; include polka, cheese, and musical chairs. That is all."
    
    #print doc
    dTokenizer = DocumentTokenizer()
    document = dTokenizer.tokenizeDocument(doc)
    pass