#! /usr/bin/python
#
# Stephen Poletto (spoletto)
# Peter Wilmot (pbwilmot)
# CSCI1580 - Web Search
# Spring 2011 - Brown University
#
# Classifies an input list of documents
# using training data and document-vectors 
# genereated by vecrep.py.
#
# Allows for both Multinomial Naive Bayes
# (MNB) and Rocchio classifier techniques
# to be used. The algoritm to use should
# be specified in the command-line args.

from documentVector import *
import numpy
import math
import sys

# Usage examples:
# ./classify.sh -mnb features.dat vecrep.dat training.dat docs.dat resultsMNB.dat
# ./classify.sh -r features.dat vecrep.dat training.dat docs.dat resultsRocchio.dat

# The categories we're using (from categories.dat) are:
# 0 Theoretical_computer_science
# 1 Artificial_intelligence
# 2 Computational_science
# 3 Computer_graphics
# 4 Computer_scientists
# 5 Computer_science_stubs
# 6 Computer_security
# 7 Algorithms
# 8 Software_engineering
# 9 Programming_languages

if (len(sys.argv) != 7):
    print ""
    print "usage: classify -<mnb|r> <features> <vecrep> <training> <docs> <results>"
    print ""
    sys.exit()

classificationAlgo = sys.argv[1]
if (classificationAlgo != "-mnb" and classificationAlgo != "-r"):
    print ""
    print "usage: classify -<mnb|r> <features> <vecrep> <training> <docs> <results>"
    print "only -mnb (multinomial naive bayes) and -r (rocchio) classifiers may be used."
    print ""
    sys.exit()
    
########################## BEGIN INITIAL SETUP ##########################

featuresFile = open(sys.argv[2], "r")
vecrepFile = open(sys.argv[3], "r")
trainingFile = open(sys.argv[4], "r")
docsToClassifyFile = open(sys.argv[5], "r")
outputFile = open(sys.argv[6], 'wb')

# How many features are there?
# Our vectors need to be 1 x n, where n
# is the number of features.
featureCount = 0
for line in featuresFile:
    featureCount += 1
featuresFile.close()

# For each document in the file generated by
# vecrep, construct a DocumentVector.
# The DocumentVector will store both a normalized
# vector and a regular count vector for feature
# occurrences.
docIDToVector = {}
for vecrep in vecrepFile.readlines():
    vecrep = vecrep.rstrip("\n")
    vector = DocumentVector(vecrep[:-1], featureCount)
    docIDToVector[vector.docID] = vector
vecrepFile.close()

# Build a dictionary mapping each class to
# a set of documents that have been classified as
# belonging to that class.
classIDToPreclassifiedDocs = {}
for classifiedDoc in trainingFile.readlines():
    docTuple = classifiedDoc.split(' ')
    docID = int(docTuple[0])
    classID = int(docTuple[1])
    classIDToPreclassifiedDocs.setdefault(classID, []).append(docID)
trainingFile.close()

# Compute the total number of documens in the collection.
totalNumberOfDocs = 0
for classID in classIDToPreclassifiedDocs:
    docsInClass = classIDToPreclassifiedDocs[classID]
    totalNumberOfDocs += len(docsInClass)

# Build a list of all the docIDs for documents we've
# been asked to classify.
unknownDocs = []
for docToClassify in docsToClassifyFile.readlines():
    unknownDocs.append(int(docToClassify.rstrip("\n")))

########################## END INITIAL SETUP ##########################

########################## BEGIN TRAINING FUNCTIONS ################### 

# Compute the idf for each feature for tf-idf calculation
def compute_feature_id_to_idf():
    featureIDToDocCount = {}
    for classID in classIDToPreclassifiedDocs:
        for docID in classIDToPreclassifiedDocs[classID]:
            for featureID in docIDToVector[docID].featureIDToNumOccurrences:
                if featureID in featureIDToDocCount:
                    featureIDToDocCount[featureID] += 1
                else:
                    featureIDToDocCount[featureID] = 1
    featureIDToIDF = numpy.array([1.0] * featureCount)
    for featureID in featureIDToDocCount:
        featureIDToIDF[featureID] = (totalNumberOfDocs + 0.0) / featureIDToDocCount[featureID]
    return featureIDToIDF

# The centroid of each class is computed as the vector average
# or center of mass of its members. We iterate over all docs
# belonging to the class, and sum up their normalized vectors.
# We then divide this normalized vector sum by the count of
# documents occurring in the class.
def train_rocchio(logOfFeatureIDToIDF):
    classIDToCentroid = {}
    for classID in classIDToPreclassifiedDocs:
        docs = classIDToPreclassifiedDocs[classID]
        countInClass = len(docs)
        normalizedVectorSum = numpy.array([0.0] * featureCount)
        for docID in docs:
            normalizedVectorSum += docIDToVector[docID].normalizedTFIDFVector(logOfFeatureIDToIDF)
        centroid = (1.0/countInClass) * normalizedVectorSum
        classIDToCentroid[classID] = centroid
    return classIDToCentroid
        

# This function builds two dictionaries: one mapping class ID
# to prior probabilities (which don't depend on term), and one
# mapping class ID to a conditional probability vector. Each slot
# in the conditional probability vector represents one of the features.
def train_multinomial_nb():
    logOfClassToPriorProbability = {}
    logOfClassToPostProbabilities = {}
    for classID in classIDToPreclassifiedDocs:
        docsInClass = classIDToPreclassifiedDocs[classID]
        logOfClassToPriorProbability[classID] = numpy.log10((len(docsInClass) + 0.0) / totalNumberOfDocs)
        featureSumsForClass = numpy.array([0.0] * featureCount)
        for docID in docsInClass:
            featureSumsForClass += docIDToVector[docID].countVector()
        totalTokenCountForClass = numpy.sum(featureSumsForClass)
        denominator = totalTokenCountForClass + featureCount
        conditionalProbabilities = (featureSumsForClass + 1.0) / denominator
        logOfClassToPostProbabilities[classID] = numpy.log10(conditionalProbabilities)
    return (logOfClassToPriorProbability, logOfClassToPostProbabilities)
      
########################## END TRAINING FUNCTIONS ################### 
        
########################## BEGIN MAIN CLASSIFICATION SCRIPT #########

if classificationAlgo == "-mnb":
    (logOfClassToPriorProbability, logOfClassToPostProbabilities) = train_multinomial_nb()
    for docID in unknownDocs:
        bestClass = (None, -1.0 * sys.maxint)
        countVector = docIDToVector[docID].countVector()
        for classID in logOfClassToPriorProbability:
            postProbabilities = numpy.array([0.0] * featureCount)
            # For every term that occurs in the doc.
            numpy.putmask(postProbabilities, countVector > 0, logOfClassToPostProbabilities[classID])
            currClassScore = logOfClassToPriorProbability[classID] + numpy.sum(postProbabilities)
            if bestClass[0] == None or currClassScore > bestClass[1]:
                bestClass = (classID, currClassScore)
        outputFile.write(str(docID) + " " + str(bestClass[0]) + "\n")  
        
elif classificationAlgo == "-r":
    logOfFeatureIDToIDF = numpy.log10(compute_feature_id_to_idf())
    classIDToCentroid = train_rocchio(logOfFeatureIDToIDF)
    for docID in unknownDocs:
        bestClass = (None, 0)
        # For each class, calculate the euclidian distance from the 
        # document's feature vector to the class' centroid vector.
        # The winner will be the class with the smallest distance.
        docVector = docIDToVector[docID].normalizedTFIDFVector(logOfFeatureIDToIDF)
        for classID in classIDToCentroid:
            diff = classIDToCentroid[classID] - docVector
            diffMagnitude = numpy.linalg.norm(diff)
            if (bestClass[0] == None or diffMagnitude < bestClass[1]):
                bestClass = (classID, diffMagnitude)
        # At this point, we've considered all the possible classes.
        # Write out to the output file how we decided to classify this doc.
        outputFile.write(str(docID) + " " + str(bestClass[0]) + "\n")

outputFile.close()

########################## END MAIN CLASSIFICATION SCRIPT #########

    