#title          : trainAndTestWithDiffClasses.py
#description    : train and test for the various classes
#author         : Arthi Ramachandran
#date           : 20140203 
#usage          : python ./trainAndTestWithDiffClasses.py <features file> <classes file> <training file> <test file>
#python_version : 2.7
#============================================================================ 

print "importing"
import sys
#from numpy import recfromcsv
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve, auc, accuracy_score, classification_report
from sklearn import preprocessing
import datetime
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pylab as pl
from sklearn.multiclass import OneVsRestClassifier

print "done importing"

features_filename = sys.argv[1]
classes_filename = sys.argv[2]
training_set_filename = sys.argv[3]
test_set_filename = sys.argv[4]

print features_filename, classes_filename, training_set_filename, test_set_filename

# can ignore the first couple of columns because actually what we're trying to predict is the last column number
def read_tsv(file_path, has_header = True):
    with open(file_path) as f:
        if has_header: col_names = f.readline().strip().split("\t")
        X = []
        count = 0;
        for line in f:
            line = line.strip().split("\t")
            entry = []
            for i in range(0,len(line)):
                #try:
                #    entry.append(int(line[i]))
                #except:
                if not line[i]:
                    line[i] = 0
                try :
                    entry.append(float(line[i]))
                except:
                    entry.append(line[i])
            X.append(entry)
            count += 1
            if count % 100000 == 0:
                print count
    return X,col_names

def read_csv(file_path, has_header=False):
    with open(file_path) as f:
        X = set()
        count = 0;
        for line in f:
            line = line.strip().split(",")
            X.add(line[0] + " " + line[1])
            count += 1
    return X

def get_class_labels(y_values):
    # convert the number of posts to actual numbers
    classes = []
    for y in y_values:
        if y == 0:
            c = 0
        elif y < 10:
            c = 1
        elif y < 100:
            c = 2
        else:
            c = 3
        classes.append(c)
    return classes

# returns a list of features (with descriptors) for elements present in the dataset
def get_features_for_set(data_set, features):
    X = []
    #print len(data_set)
    #print "test?", "dvusa www.nytimes.com/2012/12/19/business/gramercy-funds-in-middle-of-argentinas-debt-battle.html" in data_set
    for f in features:
        index = str(f[0]) + " " + str(f[1])
        #index = index_func(f[0], f[1])
        if index in data_set:
            #print index
            X.append(f)
            #print X
        #else:
        #    print index
    return X

def get_classes_for_set(data_set, features):
    to_be_included = set()
    for d in data_set:
        to_be_included.add(d.split(" ")[1])
    X = dict()
    for f in features:
        index = f[0] # just hte url
        if index in to_be_included:
            X[f[0]] = f
    return X

def features_index_func(col1, col2):
    return col1 + " " + col2
def classes_index_func(col1, col2):
    return col1

def get_data_set(features, classes, features_to_use, class_to_use):
    X = []
    y = []

    feature_cols = []
    for f in features_to_use:
        #print f
        #print feature_labels.index(f)
        feature_cols.append(feature_labels.index(f))
    class_col = class_labels.index(class_to_use)

    #print feature_cols, class_col
    for f in features:
        entry = []
        for i in feature_cols:
            entry.append(f[i])
        url = f[1] # first user/then url
        if url in classes:
            X.append(entry)
            y.append(classes[url][class_col])
    return X,y

def save(path, ext='png', close=True):
    # Actually save the figure
    plt.savefig(path + "." + ext)
    # Close it
    if close:
        plt.close()
 

print "reading data"
features, feature_labels = read_tsv(features_filename)
print feature_labels
classes, class_labels =  read_tsv(classes_filename)

training_set = read_csv(training_set_filename) # user, url, date of posting -> set(user+url)
test_set = read_csv(test_set_filename)

X_train_all = get_features_for_set(training_set, features)
y_train_all = get_classes_for_set(training_set, classes)
X_test_all = get_features_for_set(test_set, features)
y_test_all = get_classes_for_set(test_set, classes)

baseline_features = ['days_since_creation', 'reciprocal_links', 'num_friends', 'total_pop', 'num_followers', 'avg_activity', 'num_tweets']
selectivity_features = ["num_tweets_received", "avg_ext_pop_of_posts", "percent_tweets_posted"]

fout = open(training_set_filename + ".3.classifier.output", "w+")
for predict_class in class_labels:
    if "URL" in predict_class:
        continue
    for features in [baseline_features, selectivity_features, baseline_features+selectivity_features]:
        print "FEATURES: ", features
        print "PREDICTION CLASS: " + predict_class
        fout.write("----------------------------------------\n")
        fout.write("features: " + str(features) + "\n")
        fout.write("class: " + str(predict_class) + "\n")
        X_train, y_train = get_data_set(X_train_all, y_train_all, features, predict_class)
        X_test, y_test = get_data_set(X_test_all, y_test_all, features, predict_class)
        #print X_train[:10]
        fout.write("some data points:\n")
        fout.write(str(X_train[:10]))
        #print X_train_all[:10] 
        X_train = preprocessing.scale(X_train)
        X_test = preprocessing.scale(X_test)

        if predict_class == "bitly_clicks" or predict_class == "num_posts":
            y_train = get_class_labels(y_train)
            y_test = get_class_labels(y_test)
        else:
            lb = preprocessing.LabelBinarizer()
            lb.fit([0.0,1.0])
            y_train2 = []
            for y in lb.transform(y_train):
                y_train2.append(y[0])       
            y_train = y_train2
            y_test2 = []
            for y in lb.transform(y_test):
                y_test2.append(y[0])
            y_test = y_test2
        print "training", datetime.datetime.now()
        print "size of training and test sets" +"\t"+ str(len(y_train)) +"\t"+ str(len(y_test))
        fout.write("size of training and test sets" +"\t"+ str(len(y_train)) +"\t"+ str(len(y_test)) + "\n")
        fout.write("training" +"\t"+ str(datetime.datetime.now()) +"\n")
        if predict_class == "bitly_clicks" or predict_class == "num_posts":
            print "multi-class labelling"
            c = OneVsRestClassifier(svm.SVC(kernel='linear', cache_size=1000, probability=True))
            classifier = c.fit(X_train, y_train)
        else:
            classifier = svm.SVC(kernel='linear', cache_size=1000, probability=True).fit(X_train, y_train)
        fout.write(str(classifier) + "\n")
        print "testing", datetime.datetime.now()
        fout.write("testing" +"\t"+ str(datetime.datetime.now()) +"\n")
        y_pred = classifier.predict(X_test)
        probas_ = classifier.predict_proba(X_test)
        #y_pred = classifier.fit(X_train, y_train).predict(X_test)
        fout.write(str(datetime.datetime.now()) +"\n")
        fout.write("accuracy: " + str(accuracy_score(y_test, y_pred))+"\n")
        fout.write(classification_report(y_test, y_pred))
        # Compute confusion matrix 
        fout.write("computing confusion matrix" +"\n")
        cm = confusion_matrix(y_test, y_pred)
        fout.write("Confusion matrix: \n")
        fout.write(str(cm))
        fout.write("\n")
        
        if predict_class == "bitly_clicks" or predict_class == "num_posts":
            # don't print the AUC/ROC curves
            continue
 
        pl.matshow(cm)
        pl.title('Confusion matrix')
        pl.colorbar()
        pl.ylabel('True label')
        pl.xlabel('Predicted label')
        #pl.show()
        
        if features == baseline_features:
            features_name = "baseline"
        elif features == selectivity_features:
            features_name = "selectivity"
        else:
            features_name = "baseline_and_selectivity"        
        training_set_filename = training_set_filename.strip(".txt")
        save("nytimes."+training_set_filename+".CM."+features_name+"."+predict_class, ext="pdf")

        # compute AUC
        fout.write("computing AUC" +"\n")
        print y_train[1:10]
        print y_test[1:10]
        print probas_[:10,]
        precision, recall, thresholds = precision_recall_curve(y_test, probas_[:, 1])
        area = auc(recall, precision)
        fout.write("Area Under Curve: %0.2f\n" % area)
        #pl.clf()
        pl.plot(recall, precision, label='Precision-Recall curve')
        pl.xlabel('Recall')
        pl.ylabel('Precision')
        pl.ylim([0.0, 1.05])
        pl.xlim([0.0, 1.0])
        pl.title('Precision-Recall example: AUC=%0.2f' % area)
        pl.legend(loc="lower left")
        save("nytimes."+training_set_filename+".AUC."+features_name+"."+predict_class, ext="pdf")

        # compute ROC 
        fout.write("computing ROC" +"\n")
        fpr, tpr, thresholds = roc_curve(y_test, probas_[:, 1])
        roc_auc = auc(fpr, tpr)
        fout.write("Area under the ROC curve : %f\n" % roc_auc)
        pl.clf()
        pl.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
        pl.plot([0, 1], [0, 1], 'k--')
        pl.xlim([0.0, 1.0])
        pl.ylim([0.0, 1.0])
        pl.xlabel('False Positive Rate')
        pl.ylabel('True Positive Rate')
        pl.title('Receiver operating characteristic example')
        pl.legend(loc="lower right")
        #pl.show()
        save("nytimes."+training_set_filename+".ROC."+features_name+"."+predict_class, ext="pdf")
fout.close()

sys.exit()
y = get_class_labels(y)

# from http://scikit-learn.org/stable/auto_examples/plot_confusion_matrix.html#example-plot-confusion-matrix-py
# X = features should be an array of arrays of values
# Y = class should be a list of values
