"""Provides functions for splitting sentences and tokenising words therein
including stop-list and Porter-stemming functionality."""

import re
from PorterStemmer import stem

stoplist = {}

def load_stoplist():
    for row in file("stoplist", "r"):
        if row and row[0]!="#":
            stopword = row.strip()
            stoplist[stopword] = stem(stopword)

class Tokeniser:
    def __init__(self, text):
        self.text = text
        self.stop_ratio = 1.00
        
    def tokenise(self, do_stem=True, remove_stop=True):
        if not stoplist:
            load_stoplist()
            
        if remove_stop:
            n_toks = 0
            n_kept = 0

            for token in re.findall('[a-zA-Z]+', self.text):
                n_toks += 1
                if token not in stoplist:
                    n_kept += 1
                    if do_stem:
                        yield stem(token)
                    else:
                        yield token
            if n_toks == 0:
                self.stop_ratio = 1.00
            else:
                self.stop_ratio = float(n_kept)/n_toks
        else:
            for token in re.findall('[a-zA-Z]+', self.text):
                if do_stem:
                    yield stem(token)
                else:
                    yield token
                
SUFFIX_RE = re.compile(r'\(.*\)$')
def removeSuffix(text):
    return re.sub(SUFFIX_RE, '', text.strip()).strip()

if __name__ == "__main__":
    text = "patient is dead."
    tkn = Tokeniser(removeSuffix(text))
    for token in tkn.tokenise(False):
        print token
    print tkn.stop_ratio
    
