#coding: utf-8
from nltk.tokenize import *
import logging

class Tokenizer:
    """
        class to tokenize sentence
        sentence  - string
        tokenization result -
    """
    def __init__(self):
        #self._splitted_text  = nltk.word_tokenize(self._text)
        #pattern to correctly chunk numbers
        #pattern = r'\token+|\$\d+\.\d+|[^\token\s]+'

        #tokenizer = RegexpTokenizer("[\w']+")
        self.tokenizer = WordPunctTokenizer()

    def tokenize(self, sentence):
        tokenized_sentence = self.tokenizer.tokenize(sentence)

        #print tokenization result
        logging.debug("======tokenization result======:")
        for t in tokenized_sentence:
            logging.debug(t)

        return tokenized_sentence
