from rakg.graph_utils import *
from rakg.timer import *
import rakg.graphsim as gsim
import numpy as np
import networkx as nx
from itertools import izip
import sys
from memory_profiler import profile
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn import cross_validation
import rakg.facebook100_parser as fb_parser
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import roc_curve, auc, roc_auc_score
import matplotlib
matplotlib.use('Agg') 
import pylab as pl
import random
import sys
from scipy import interp
from sklearn.cross_validation import StratifiedKFold
from sklearn import linear_model
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline


def main():
    pass
    
def draw_rocs(values_list, file_name):
    """
    Draws and outputs a graph of ROC curves.
    
    Parameters
    ----------
    :type values_list: python list
    :param values_list: a list that holds the details of every roc curve to 
                        be painted. [(label1, fpr1, tpr1, roc_auc1),
                         (label2, fpr2, tpr2, roc_auc2), ...etc]
    
    :param file_name: the name of the PNG picture (you can prepend the path as well)
    
    """
    pl.clf()
    
    for label, fpr, tpr, roc_auc in values_list:
        pl.plot(fpr, tpr, lw=1, label='%s (auc = %0.2f)' % (label, roc_auc))
    
    pl.plot([0, 1], [0, 1], 'k--')
    pl.xlim([-0.05, 1.05])
    pl.ylim([-0.05, 1.05])
    pl.xlabel('False Positive Rate')
    pl.ylabel('True Positive Rate')
    pl.title('Receiver operating characteristic')
    pl.legend(loc="lower right")
    pl.savefig(file_name,dpi=72)
    
def prepare_training_set(file_path, file_parser, features_adder, random_state,
                         edge_removal_perc=0.5, enabled_features="A"):
    G, Nodes_X = file_parser.parse(file_path)
    U = build_U(G.nodes())
    Y = build_Y(G, U)
    
    Gx = G.copy()
    removal_perc = edge_removal_perc
    num_edges_to_remove = int( removal_perc * Gx.number_of_edges() )
    removed_edges = random.sample( Gx.edges(), num_edges_to_remove )
    Gx.remove_edges_from(removed_edges)
    
    degrees = gsim.get_degrees_list(Gx)
    A = nx.adj_matrix(Gx)
    A = np.asarray(A)
    data = None
    
    if enabled_features == "A":
        data = features_adder(data, A, U, degrees, G.nodes())
    elif enabled_features == "B":
        data = file_parser.add_nodes_features(Nodes_X, data, U, G.nodes())
    elif enabled_features == "C":
        data = features_adder(data, A, U, degrees, G.nodes())
        data = file_parser.add_nodes_features(Nodes_X, data, U, G.nodes())
    else:
        sys.exit()
    
    X, y = shuffle(data, Y, random_state=random_state)
    
    return X, y

def standard_features_adder(data, A, U, degrees, nodes):
    data = add_feature(data, U, gsim.cn(A), nodes)
    data = add_feature(data, U, gsim.lp(A), nodes)
    data = add_feature(data, U, gsim.salton(A, degrees), nodes)
    data = add_feature(data, U, gsim.jacard(A, degrees), nodes)
    data = add_feature(data, U, gsim.sorensen(A, degrees), nodes)
    data = add_feature(data, U, gsim.hpi(A, degrees), nodes)
    data = add_feature(data, U, gsim.hdi(A, degrees), nodes)
    data = add_feature(data, U, gsim.lhn1(A, degrees), nodes)
    data = add_feature(data, U, gsim.pa(A, degrees), nodes)
    data = add_feature(data, U, gsim.ra(A, degrees), nodes)
    return data

def train_with_cross_val(X,Y, clf, test_name, n_folds=10):
    kfold = cross_validation.KFold(len(X), n_folds=n_folds)
    mean_tpr = 0.0
    mean_fpr = np.linspace(0, 1, 100)
    
    for train, test in kfold:
            probas_ = clf.fit(X[train], Y[train]).predict_proba(X[test])
            fpr, tpr, thresholds = roc_curve(Y[test], probas_[:, 1])
            mean_tpr += interp(mean_fpr, fpr, tpr)
            mean_tpr[0] = 0.0

    mean_tpr /= len(kfold)
    mean_tpr[-1] = 1.0
    mean_auc = auc(mean_fpr, mean_tpr)
    
    return (test_name, mean_fpr, mean_tpr, mean_auc)

def train_with_holdout(X, Y, clf, test_name, random_state, test_size=0.33):
    X_train, X_test, y_train, y_test = \
        cross_validation.train_test_split(X, Y, test_size=test_size, random_state=random_state)
    
    clf.fit(X_train, y_train)
    probas = clf.predict_proba(X_test)
    fpr, tpr, thresholds = roc_curve(y_test, probas[:, 1])
    roc_auc = auc(fpr, tpr)
    
    return (test_name, fpr, tpr, roc_auc)


main()
























