'''
Created on Feb 28, 2011

@author: Chris
'''
import Document
import Section
import Sentence
class DocumentTokenizer(object):
    '''
    provides a way to tokenize the entire document to facilitate suggestion creation.
    '''


    def __init__(self):
        '''
        Constructor
        '''
        pass
    
    def tokenizeDocument(self, document):
        """ 
        This method takes a document and tokenizes it into sections for further tokenizing and analysis 
        """
        working_doc = document.replace("\t", "")
        sections = working_doc.split("\n")
        doc = Document.Document()
        for section in sections:
            #if isEmpty(section): # need to remove/ignore empty paragraphs
            doc.addSection(self.tokenizeSection(section))
        
                
        
        
        print sections
        return doc
    
    def tokenizeSection(self, sectionText):
        """ 
        This method takes a section from the document and tokenizes it into sentences for further tokenizing and analysis 
        """
        section = Section.Section()
        sentences = sectionText.split(".") #take care of ... using regexp
        for sentence in sentences:
            section.addSentence(self.tokenizeSentence(sentence))
        
        return section
    
    def tokenizeSentence(self, sentenceText):
        """ 
        This method takes a sentence from the document and tokenizes it into words for further tokenizing and analysis 
        """
        sentence = Sentence.Sentence()
        words = sentenceText.split(" ") #needs sanitization
        for word in words:
            #find syllable
            #find part of speech
            sentence.addWord(word)
        
        return sentence
    
    def isEmpty(self, string):
        return False
    
doc = "Hello, my name is test. I really like to dance and eat butter.\n\t \n\t My other favorite activities include polka, cheese, and musical chairs. That is all."

print doc
dTokenizer = DocumentTokenizer()
document = dTokenizer.tokenizeDocument(doc)
pass