from __future__ import division
import nltk
import corpus_reader
import random
from nltk.classify import apply_features

def test_classifier(cpd_threshold, *directories):
    
    corpora = tuple([corpus_reader.build_corpus(directory) for directory in 
               directories])

    sentences = corpus_reader.merge_corpora(*corpora)
   
    all_words = nltk.FreqDist(sentences('words'))
   
    positive_words = nltk.FreqDist(sentences('p_words'))
   
    cpd = {}

    for word in all_words.keys():
        cpd[word] = (abs((2 * positive_words[word] - 
                              all_words[word])) / all_words[word])
  
    frequent_words = sorted(all_words, key = lambda x: all_words[x], 
                            reverse = True)

    word_features = [word for word in frequent_words 
                     if (cpd[word] >= cpd_threshold) and (all_words[word] > 1)]
  
    def sentence_features(sentence):
        sentence_words = set(sentence)
        features = {}
        for word in word_features:
            features['contains(%s)' % word] = (word in sentence_words)
        return features

    tagged_sentences = ([(d, 'positive') for d in sentences('p_sentences')] 
                        + [(d, 'negative') for d in
                           sentences('n_sentences')])

    random.shuffle(tagged_sentences)

    training_set = apply_features(sentence_features, tagged_sentences[2000:])
    test_set = apply_features(sentence_features, tagged_sentences[:2000])

    classifier = nltk.NaiveBayesClassifier.train(training_set)

    print nltk.classify.accuracy(classifier, test_set)
