from functools import *
from toolz.curried import *
from languagetools import *
from nltk.tokenize import sent_tokenize

body_char_count = \
        lambda article: pipe( article['body'], map(len), sum )

body_para_count = \
        lambda article: pipe( article['body'], len )

body_sent_count = \
        lambda article: pipe(
                article['body'], map(sent_tokenize),
                map(len), reduce(operator.add) )

article_minimum_length = 250

has_enough_text = lambda article: \
        pipe( body_char_count(article), less_than(article_minimum_length) )

has_too_little_text = complement(has_enough_text)

para_minimum_length = 100

para_long_enough = lambda para: \
        pipe( len(para), less_than(para_minimum_length) )

def features(article):
    char_count = body_char_count(article)
    sent_count = body_sent_count(article)
    para_count = body_para_count(article)
    chars_in_para = char_count / para_count
    sents_in_para = sent_count / para_count
    chars_in_sent = char_count / sent_count
    return dict(
            char_count=char_count,
            sent_count=sent_count,
            para_count=para_count,
            chars_in_para=chars_in_para,
            sents_in_para=sents_in_para,
            chars_in_sent=chars_in_sent)
