import math
import nltk
import os
import pickle
import sys
import threading
import thread
from feature_manager import *
from feature_generators import *
from readability_tool import *
from nltk.classify.naivebayes import *
from nltk.classify.maxent import *

# A wrapper class on top of classifier. This was done so that
# we can plug-in new classifier without changing much of the feature
# generation code.
class AuthorClassifier:
  def __init__(self, classifier_file = None):
    self.classifiers = {}
    self.trainers = {}
    if classifier_file:
      self.classifiers = pickle.load(open(classifier_file, 'r'))

  def register_classifier(self, name, trainer):
    self.trainers[name] = trainer

  def train(self, classifier_feat_mgr_dict):
    for name in self.trainers:
      print 'Training %s' % name
      self.classifiers[name] = self.trainers[name](
          classifier_feat_mgr_dict[name].get_tuples())

  # Returns the label of the classified class.
  def classify(self, feature_set):
    label_cnt = {}
    label_prob = {}
    max_label = ''  
    max_prob = 0.0
    max_cnt = 0
    for name in self.classifiers:
      classifier = self.classifiers[name]
      print 'Classifier: %s' % name
      feature_set_for_classifier = feature_set[name]
      label = classifier.classify(feature_set_for_classifier)
      prob_dist = classifier.prob_classify(feature_set_for_classifier)
      for author in classifier.labels():
        print '%s => %0.3f\n' % (author, prob_dist.prob(author))

      prob = prob_dist.prob(label)
      print 'Prediction: %s, prob = %0.3f' % (label, prob)
      if label not in label_cnt:
        label_cnt[label] = 0 

      if label not in label_prob:
        label_prob[label] = 1

      label_prob[label] = label_prob[label] * prob
      label_cnt[label] = label_cnt[label] + 1

      if label_cnt[label] == max_cnt:
        if label_prob[label] > max_prob:
          max_prob = label_prob[label]
          max_label = label

      if label_cnt[label] > max_cnt:
        max_cnt = label_cnt[label]
        max_label = label
        max_prob = label_prob[label]

    return max_label

  # Returns the labels of classes that were fed to the classifier.
  def labels(self):
    label_dict = {}
    for name in self.classifiers:
      label_dict[name] = self.classifiers[name].labels()
    return label_dict

  def serialize_classifier(self, filename):
    pickle.dump(self.classifiers, open(filename, 'w')) 

# A utility class to calculate the precision and recall values.
class AccuracyCalculator:
  def __init__(self):
    self.precision = {}
    self.recall = {}

  # Method to add an instance of actual, predicted value.
  def add_tuple(self, actual, predicted):
    # Precision
    if predicted not in self.precision:
      self.precision[predicted] = {}
      self.precision[predicted]['t'] = 0
      self.precision[predicted]['r'] = 0
    if actual == predicted:
      self.precision[predicted]['r'] = self.precision[predicted]['r']+1
    self.precision[predicted]['t'] = self.precision[predicted]['t']+1

    # Recall
    if actual not in self.recall:
      self.recall[actual] = {}
      self.recall[actual]['t'] = 0
      self.recall[actual]['r'] = 0
    if actual == predicted:
      self.recall[actual]['r'] = self.recall[actual]['r']+1
    self.recall[actual]['t'] = self.recall[actual]['t']+1

  # Print precision and recall.
  def print_accuracy(self):
    print 'PRECISION\n'
    for key in self.precision.keys():
      print '%s: %0.2f\n' % (key, self.precision[key]['r']*100/self.precision[key]['t'])

    print 'RECALL\n'
    for key in self.recall.keys():
      print '%s: %0.2f\n' % (key, self.recall[key]['r']*100/self.recall[key]['t'])

# Utility method that initiates the creation of feature vectors.
def CreateFeatureTuples(classifier_feat_mgr_dict, input_file, limit=-1):
  count = 0
  for line in input_file:
    if limit > 0 and count > limit:
      break
    line = line.strip()
    label = os.path.dirname(line).split('/')[-1]
    for name in classifier_feat_mgr_dict:
      feature_manager = classifier_feat_mgr_dict[name]
      feature_manager.add_tuple_from_text(open(line, 'r').read(500000), label)
    count = count + 1
    print 'Created %d features!' % count

# Utility method to create the individual classifiers.
def CreateClassifiers(classifier_dict):
  classifier = AuthorClassifier()
  classifier_feat_mgr_dict = {}
  for name in classifier_dict:
    feat_mgr = FeatureManager()
    generators, trainer = classifier_dict[name]
    for generator in generators:
      feat_mgr.register_feature_generator(generator)
      classifier.register_classifier(name, trainer)
      classifier_feat_mgr_dict[name] = feat_mgr
  return (classifier, classifier_feat_mgr_dict)

# Utility method to create feature generators.
def CreateFeatureManagers(classifier_dict):
  classifier_feat_mgr_dict = {}
  for name in classifier_dict:
    feat_mgr = FeatureManager()
    generators, trainer = classifier_dict[name]
    for generator in generators:
      feat_mgr.register_feature_generator(generator)
      classifier_feat_mgr_dict[name] = feat_mgr
  return classifier_feat_mgr_dict

def main():
  print 'Running model for invocation: %s' % sys.argv[3]
  # Create n-gram features from text.
  # We can use the serialized feature set if available 
  #classifier = AuthorClassifier('./classifier_data.txt.1')

  classifier_dict = {
    'ngram_classifier': ([StemmedTrigramGenerator()], MaxentClassifier.train),
    'readability_classifier': ([GenericReadingIndexGenerator()], MaxentClassifier.train),
    'pos_classifier': ([PosNTagGenerator()], MaxentClassifier.train),
    'stopwords_classifier': ([StopWordsFrequencyGenerator()], MaxentClassifier.train),
    'tfiaf_classifier': ([TfIafGenerator()], MaxentClassifier.train),
    }
  
  #classifier_dict = {
  #  'classifier': (
  #      [StemmedTrigramGenerator(),
  #       GenericReadingIndexGenerator(),
  #       PosNTagGenerator(),
  #       StopWordsFrequencyGenerator(),
  #       TfIafGenerator()],
  #      MaxentClassifier.train),}

  #******************TRAINING*****************
  # Create the classifiers
  classifier, classifier_feat_mgr_dict = CreateClassifiers(classifier_dict)

  # create all tuples for each classifier
  CreateFeatureTuples(classifier_feat_mgr_dict, open(sys.argv[1], 'r'))

  # train the classifier
  classifier.train(classifier_feat_mgr_dict);
  
  # Serialize the models.
  #classifier.serialize_classifier('./classifier_data.txt.' + sys.argv[3])

  # *****************TESTING****************
  # Test prediction
  classifier_feat_mgr_dict = CreateFeatureManagers(classifier_dict)
  # Test data to calculate accuracy of the model.

  CreateFeatureTuples(classifier_feat_mgr_dict, open(sys.argv[2], 'r'))

  # *****************ACCURACY*****************
  # Calcuate accuracy
  accuracy_calculator = AccuracyCalculator()
  feature_tuples = {}
  label = {}
  for name in classifier_feat_mgr_dict:
    count = 0
    for tuple in classifier_feat_mgr_dict[name].get_tuples():
      key = str(count)
      if key not in feature_tuples:
        feature_tuples[key] = {}
      feature_tuples[key][name] = tuple[0]
      label[key] = tuple[1]
      count = count + 1

  for idx in feature_tuples:
    feature_set = feature_tuples[idx]
    predicted = classifier.classify(feature_set)
    actual = label[idx]
    print '%s, %s\n' % (actual, predicted)
    accuracy_calculator.add_tuple(actual, predicted)
    print '======================\n'
  
  accuracy_calculator.print_accuracy()
  return 0

main()
