from __future__ import division
import nltk
import corpus_reader
import random


sentences = corpus_reader.build_corpus('clean-data')

all_words = nltk.FreqDist(sentences('words'))
positive_words = nltk.FreqDist(sentences('p_words'))

cpd = {}

for word in all_words.keys():
    cpd[word] = (abs((2 * positive_words[word] - all_words[word])) / 
                 all_words[word])
  
frequent_words = sorted(all_words, key = lambda key: all_words[key], 
                        reverse = True)
high_cpd = [word for word in frequent_words if cpd[word] >= 0.25]
word_features = high_cpd[:4000]

def sentence_features(sentence):
    sentence_words = set(sentence)
    features = {}
    for word in word_features:
        features['contains(%s)' % word] = (word in sentence_words)
    return features

featuresets = ([(sentence_features(d), 'positive') for d in 
                sentences('p_sentences')] + 
               [(sentence_features(d), 'negative') for d in
                sentences('n_sentences')])

random.shuffle(featuresets)

training_set, test_set = featuresets[2000:], featuresets[:2000]

classifier = nltk.NaiveBayesClassifier.train(training_set)

print nltk.classify.accuracy(classifier, test_set)
