import nltk
import os
import pickle
import sys
from feature_manager import *
from feature_generators import *
from readability_tool import *
from nltk.classify.naivebayes import *

# A wrapper class on top of classifier. This was done so that
# we can plug-in new classifier without changing much of the feature
# generation code.
class AuthorClassifier:
  def __init__(self, labelled_feature_tuples):
    self.labelled_feature_tuples = labelled_feature_tuples
    self.classifier = self.__train()

  # Private method which is used to train the classifier.
  def __train(self):
    return NaiveBayesClassifier.train(self.labelled_feature_tuples)

  # Returns the label of the classified class.
  def classify(self, feature_set):
    return self.classifier.classify(feature_set)

  # Returns a ProbDist structure which contains the probabilities of
  # different class labels.
  def prob_classify(self, feature_set):
    return self.classifier.prob_classify(feature_set)

  # Returns the labels of classes that were fed to the classifier.
  def labels(self):
    return self.classifier.labels()

  def show_most_informative_features(self, num):
    return self.classifier.show_most_informative_features(num)

# A utility class to calculate the precision and recall values.
class AccuracyCalculator:
  def __init__(self):
    self.precision = {}
    self.recall = {}

  # Method to add an instance of actual, predicted value.
  def add_tuple(self, actual, predicted):
    # Precision
    if predicted not in self.precision:
      self.precision[predicted] = {}
      self.precision[predicted]['t'] = 0
      self.precision[predicted]['r'] = 0
    if actual == predicted:
      self.precision[predicted]['r'] = self.precision[predicted]['r']+1
    self.precision[predicted]['t'] = self.precision[predicted]['t']+1

    # Recall
    if actual not in self.recall:
      self.recall[actual] = {}
      self.recall[actual]['t'] = 0
      self.recall[actual]['r'] = 0
    if actual == predicted:
      self.recall[actual]['r'] = self.recall[actual]['r']+1
    self.recall[actual]['t'] = self.recall[actual]['t']+1

  # Print precision and recall.
  def print_accuracy(self):
    print 'PRECISION\n'
    for key in self.precision.keys():
      print '%s: %0.2f\n' % (key, self.precision[key]['r']*100/self.precision[key]['t'])

    print 'RECALL\n'
    for key in self.recall.keys():
      print '%s: %0.2f\n' % (key, self.recall[key]['r']*100/self.recall[key]['t'])

# Utility method that initiates the creation of feature vectors.
def CreateFeatureTuples(feature_manager, input_file, limit=-1):
  count = 0
  for line in input_file:
    if limit > 0 and count > limit:
      break
    line = line.strip()
    label = os.path.dirname(line).split('/')[-1]
    feature_manager.add_tuple_from_text(open(line, 'r').read(), label)
    count = count + 1
    print 'Created %d features!' % count

def main():
  print 'Running model for invocation: %s' % sys.argv[3]
  # Create n-gram features from text.
  # We can use the serialized feature set if available 
  #feature_manager = FeatureManager('./model/training.p.1')
  feature_manager = FeatureManager()
  feature_manager.register_feature_generator(StemmedTrigramGenerator())
  feature_manager.register_feature_generator(PosNTagGenerator())
  feature_manager.register_feature_generator(ARIIndexGenerator())
  feature_manager.register_feature_generator(SmogIndexGenerator())
  feature_manager.register_feature_generator(FleschReadingEaseIndexGenerator())
  feature_manager.register_feature_generator(FleschKincaidGradeLevelIndexGenerator())
  feature_manager.register_feature_generator(GunningFogIndexGenerator())
  feature_manager.register_feature_generator(ColemanLiauIndexGenerator())
  CreateFeatureTuples(feature_manager, open(sys.argv[1], 'r'))
  feature_manager.store_features('./model/training.p.' + sys.argv[3])

  # Feature file to train the model.
  labelled_feature_tuples = feature_manager.get_tuples()
  
  # Test data to calculate accuracy of the model.
  feature_manager.reset()
  CreateFeatureTuples(feature_manager, open(sys.argv[2], 'r'))
  test_tuples = feature_manager.get_tuples()
  feature_manager.store_features('./model/test.p.' + sys.argv[3])

  classifier = AuthorClassifier(labelled_feature_tuples)
  classifier.show_most_informative_features(100)
  print '%s\n' % classifier.labels()

  # We can use the serialized version of test data if available.
  #test_feature_manager = FeatureManager('./test.p')
  #test_tuples = feature_manager.get_tuples()

  accuracy_calculator = AccuracyCalculator()
  for tuple in test_tuples:
    predicted = classifier.classify(tuple[0])
    actual = tuple[1]
    print '%s, %s\n' % (tuple[1], predicted)
    accuracy_calculator.add_tuple(actual, predicted)
    prob_dist = classifier.prob_classify(tuple[0])
    for author in classifier.labels():
      print '%s => %0.3f\n' % (author, prob_dist.prob(author))
    print '======================\n'

  accuracy_calculator.print_accuracy()
  return 0

main()
