import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))

import networkx as nx
import numpy as np
from rakg import graphsim, graph_utils,\
     light_srw, matrix_fact, supervised_methods, \
     testing_utils, timer, srw
from scipy import interp
from sklearn.metrics import roc_curve, auc, roc_auc_score


def main():
    args = sys.argv
    choice = int(args[1])
    dataset_name = args[2]
    
    #check which dataset to load, then get G, Y, U
    
    if choice == 1:
        test_unsupervised_methods_1()

    
def test_unsupervised_methods_1(G, U, Y, node_to_index, options):
    """
    
    Do link prediction with local un-supervised methods and print the auc for each
    un-supervised method.
    
    Parameters:
    -----------
    G: networkx graph object.
    U: a list that holds dyads for testing.
    Y: the true labels for the dyads.
    node_to_index: a dictionary that maps from a node name to a node index.
    options: is a dictionary that holds parameters values. 
            Here we only need: options['katz_h'].
    
    Returns:
    --------
    It doesn't return anything, but only prints the auc scores for each method
    to the output.
    """
    t = timer.Timerx()
    
    degrees = graphsim.get_degrees_list(G)
    katz_h = options['katz_h']
    A = np.array( nx.adj_matrix(G) )
    
    print "Computing the auc of the simple un-supervised methods...\n"
    
    t.start()
    cn_p = graphsim.predict_scores(U, graphsim.cn(A), node_to_index)
    print "CN: ", t.stop(), "\n"

    t.start()
    salton_p = graphsim.predict_scores(U, graphsim.salton(A, degrees), node_to_index)
    print "Salton: ", t.stop(), "\n"
    
    t.start()
    jacard_p = graphsim.predict_scores(U, graphsim.jacard(A, degrees), node_to_index)
    print "Jacard: ", t.stop(), "\n"
    
    t.start()
    sorensen_p = graphsim.predict_scores(U, graphsim.sorensen(A, degrees), node_to_index)
    print "Sorensen: ", t.stop(), "\n"
    
    t.start()
    hpi_p = graphsim.predict_scores(U, graphsim.hpi(A, degrees), node_to_index)
    print "HPI: ", t.stop(), "\n"
    
    t.start()
    hdi_p = graphsim.predict_scores(U, graphsim.hdi(A, degrees), node_to_index)
    print "HDI: ", t.stop(), "\n"
    
    t.start()
    lhn1_p = graphsim.predict_scores(U, graphsim.lhn1(A, degrees), node_to_index)
    print "LHN1: ", t.stop(), "\n"
    
    t.start()
    pa_p = graphsim.predict_scores(U, graphsim.pa(A, degrees), node_to_index)
    print "PA: ", t.stop(), "\n"
    
    t.start()    
    aa_p = graphsim.predict_scores(U, graphsim.aa(A, degrees), node_to_index)
    print "AA: ", t.stop(), "\n"
    
    t.start()
    ra_p = graphsim.predict_scores(U, graphsim.ra(A, degrees), node_to_index)
    print "RA: ", t.stop(), "\n"
    
    t.start()
    lp_p = graphsim.predict_scores(U, graphsim.lp(A, h=katz_h), node_to_index)
    print "LP: ", t.stop(), "\n"
    
    print 'Done.\n\n'
    
    print "\nAUCs:\n"
    
    cn_auc = testing_utils.compute_AUC(Y, cn_p)
    print "CN: ", cn_auc, "\n"
    salton_auc =  testing_utils.compute_AUC(Y, salton_p)
    print "Salton: ", salton_auc, "\n"
    jacard_auc = testing_utils.compute_AUC(Y, jacard_p)
    print "Jacard: ", jacard_auc, "\n"
    sorensen_auc = testing_utils.compute_AUC(Y, sorensen_p)
    print "Sorensen: ", sorensen_auc, "\n"
    hpi_auc = testing_utils.compute_AUC(Y, hpi_p)
    print "HPI: ", hpi_auc, "\n"
    hdi_auc = testing_utils.compute_AUC(Y, hdi_p)
    print "HDI: ", hdi_auc, "\n"
    lhn1_auc = testing_utils.compute_AUC(Y, lhn1_p)
    print "LHN1: ", lhn1_auc, "\n"
    pa_auc = testing_utils.compute_AUC(Y, pa_p)
    print "PA: ", pa_auc, "\n"
    aa_auc = testing_utils.compute_AUC(Y, aa_p)
    print "AA: ", aa_auc, "\n"
    ra_auc = testing_utils.compute_AUC(Y, ra_p)
    print "RA: ", ra_auc, "\n"
    lp_auc = testing_utils.compute_AUC(Y, lp_p)
    print "LP: ", lp_auc, "\n"
    
    print 'The End.'
    
    
def test_unsupervised_methods_2(G, U, Y, node_to_index, options):
    """
    
    Do link prediction with global un-supervised methods and print the auc for each
    un-supervised method.
    
    Parameters:
    -----------
    G: networkx graph object.
    U: a list that holds dyads for testing.
    Y: the true labels for the dyads.
    node_to_index: a dictionary that maps from a node name to a node index.
    options: is a dictionary that holds parameters values. 
            Here we only need: options['katz_h'], options['rwr_alpha'],
            options['lrw_nSteps'].
    
    Returns:
    --------
    It doesn't return anything, but only prints the auc scores for each method
    to the output.
    """
    t = timer.Timerx()
    rwr_alpha = options['rwr_alpha']
    lrw_nSteps = options['lrw_nSteps']
    A = np.array( nx.adj_matrix(G) )
    
    print "Computing the auc of the global un-supervised methods...\n"
    
    t.start()
    katz_h = graphsim.katz_h(A)
    katz_p = graphsim.predict_scores(U, graphsim.katz(A, katz_h), node_to_index)
    print "Katz: ", t.stop(), "\n"
    
    t.start()
    rwr_p = graphsim.RWR_Clf(A, rwr_alpha).score(U, node_to_index) 
    print "RWR: ", t.stop(), "\n"
    
    t.start()
    lrw_p = graphsim.LRW_Clf(A, lrw_nSteps, G.number_of_edges()).score(U, node_to_index)
    print "LRW: ", t.stop(), "\n"
     
    t.start()
    srw_p = graphsim.SRW_Clf(A, lrw_nSteps, G.number_of_edges()).score(U, node_to_index)
    print "SRW: ", t.stop(), "\n"
    
    print 'Done.\n\n'
    
    print "\nAUCs:\n"
    
    katz_auc = testing_utils.compute_AUC(Y, katz_p)
    print "Katz: ", katz_auc, "\n"
    rwr_auc = testing_utils.compute_AUC(Y, rwr_p)
    print "RWR: ", rwr_auc, "\n"
    lrw_auc = testing_utils.compute_AUC(Y, lrw_p)
    print "LRW: ", lrw_auc, "\n"
    srw_auc = testing_utils.compute_AUC(Y, srw_p)
    print "SRW: ", srw_auc, "\n"
    
    print "The end.\n"
    
    
    
def test_supervised_methods_1(Xs, Ys, tests_names, plot_file_name):
    """
    
    Train a logistic regression classifier using the set of datasets.
    It outputs a plot for the aucs.
    
    Parameters:
    -----------
    Xs: a list of training datasets.
    Ys: a list of labeling vectors.
    tests_names: a list that contains the names of each test with respect
                 to each dataset in Xs (i.e. the auc curve name).
    plot_file_name: the name of the figure to be saved. (you can prepend the path as well).
    """
    rocs = []
    t = timer.Timerx()
    
    for X, Y, test_name in zip(Xs, Ys, tests_names):
        t.start()
        roc = supervised_methods.logistic(X, Y, test_name)
        print "Test name: %s, time: %s, auc: %f\n" % (test_name, t.stop(), roc[3])
        rocs.append(roc)
        
    testing_utils.draw_rocs(rocs, plot_file_name)
    
    
def test_supervised_methods_2(Xs, Ys, tests_names, plot_file_name, options):
    """
    
    Train a random forest classifier using the set of datasets.
    It outputs a plot for the aucs.
    
    Parameters:
    -----------
    Xs: a list of training datasets.
    Ys: a list of labeling vectors.
    tests_names: a list that contains the names of each test with respect
                 to each dataset in Xs (i.e. the auc curve name).
    plot_file_name: the name of the figure to be saved. (you can prepend the path as well).
    options: a dictionary that holds needed parameters. 
            Here we need: options['RF_n_estimators']
    """
    
    rocs = []
    t = timer.Timerx()
    
    for X, Y, test_name in zip(Xs, Ys, tests_names):
        t.start()
        roc = supervised_methods.random_forest(X, Y, test_name, options)
        print "Test name: %s, time: %s, auc: %f\n" % (test_name, t.stop(), roc[3])
        rocs.append(roc)
        
    testing_utils.draw_rocs(rocs, plot_file_name)
    
    
def test_supervised_methods_3(Xs, Ys, tests_names, plot_file_name, options):
    """
    Train a Restricted Boltzmann Machine + Logistic regression classifier using the set of datasets.
    It outputs a plot for the aucs.
    
    Parameters:
    -----------
    Xs: a list of training datasets.
    Ys: a list of labeling vectors.
    tests_names: a list that contains the names of each test with respect
                 to each dataset in Xs (i.e. the auc curve name).
    plot_file_name: the name of the figure to be saved. (you can prepend the path as well).
    options: a dictionary that holds needed parameters. 
            Here we need: options['RBM_n_components'], options['RBM_n_iter'],
                        options['n_RBMs']
    """
    t = timer.Timerx()
    rocs = []
    
    for X, Y, test_name in zip(Xs, Ys, tests_names):
        t.start()
        roc = supervised_methods.rbm_logistic(X, Y, test_name, options)
        print "Test name: %s, time: %s, auc: %f\n" % (test_name, t.stop(), roc[3])
        rocs.append(roc)
        
    testing_utils.draw_rocs(rocs, plot_file_name)
    
def test_supervised_methods_4(Xs, Ys, tests_names, plot_file_name, options):
    """
    Train a Restricted Boltzmann Machine + Random Forest classifier using the set of datasets.
    It outputs a plot for the aucs.
    
    Parameters:
    -----------
    Xs: a list of training datasets.
    Ys: a list of labeling vectors.
    tests_names: a list that contains the names of each test with respect
                 to each dataset in Xs (i.e. the auc curve name).
    plot_file_name: the name of the figure to be saved. (you can prepend the path as well).
    options: a dictionary that holds needed parameters. 
            Here we need: options['RBM_n_components'], options['RBM_n_iter'],
                        options['n_RBMs'], options['RF_n_estimators']
    """
    
    rocs = []
    t = timer.Timerx()
    
    for X, Y, test_name in zip(Xs, Ys, tests_names):
        t.start()
        roc = supervised_methods.rbm_rf(X, Y, test_name, options)
        print "Test name: %s, time: %s, auc: %f\n" % (test_name, t.stop(), roc[3])
        rocs.append(roc)
        
    testing_utils.draw_rocs(rocs, plot_file_name)
    
    
def test_supervised_random_walk(G, X, plot_file_name, options_list):
    """
    Applies the Supervised Random Walk method and outputs a plot for the aucs.
    
    Parameters:
    ------------
    G: networkx graph.
    X: node attributes matrix. Each row i is the features vector for node i.
    plot_file_name: the name of the figure to be saved. (you can prepend the path as well).
    
    options_list: a list that contains dictionaries that holds parameters values. 
                  Each dictionary represents a complete test case. All the curves
                  of the test cases will be joined in one plot.
            Example:
            --------
            options1["srw_test_name"], options1["srw_k"], options1["srw_delta"],
            options1["srw_alpha"], options1["srw_iter"].
            options_list = [options1, options2, ..etc]
            
            The parameters in each dictionary means:
            ----------------------------------------
            k: the number of neighbours a node must have in order to be considered a candidate source node.
            delta: the number of edges the candidate source node made in the future that close a triangle. 
                   (i.e. the future/destination node is a friend of a friend, so delta is a threshold that sees how many
                     of these future/destination nodes are friends of current friends). A candidate source node
                     that has a degree above k, and made future friends above delta, then becomes a source node
                     for training.
            alpha: restart probability.
            iter: gradient desecent epochs.
    """
    
    
    rocs = []
    t = timer.Timerx()
    
    for options in options_list:
        test_name = options["srw_test_name"]
        k = options["srw_k"]
        delta = options["srw_delta"]
        alpha = options["srw_alpha"]
        iter = options["srw_iter"]
        t.start()
        roc, typical_auc, my_auc = supervised_methods.train_with_srw(G, X, test_name, k, delta, alpha, iter)
        print "Time: ", t.stop(), "\n\n"
        print "typical auc: ", typical_auc, "\n"
        print "my auc: ", my_auc, "\n"
        print "============"
        rocs.append(roc)
        
    
    testing_utils.draw_rocs(rocs, plot_file_name) 
    
    
def test_matrix_fact_normal(G, X, plot_file_name, options_list):  
    """
    Applies the normal factorization model and outputs a plot for the aucs.
    
    Parameters:
    -----------
    G: networkx graph.
    X: node attributes matrix. Each row i is the features vector for node i.
    plot_file_name: the name of the figure to be saved. (you can prepend the path as well).
    options_list: a list that contains dictionaries that holds parameters values. 
                  Each dictionary represents a complete test case. All the curves
                  of the test cases will be joined in one plot. Each dictionary must 
                  have the following parameters: options['mf_test_name']
                  options['mf_n_latent_feats'], options['mf_n_folds'], 
                  options['mf_alpha'], options['mf_n_iter'], options['mf_with_sampling']
    """
    t = timer.Timerx()
    rocs = []
    
    for options in options_list:
        test_name = options['mf_test_name']
        t.start()
        roc = supervised_methods.train_matrix_fact_normal(G, X, test_name, options)
        print "Test name: %s, time: %s, auc: %f\n" % (test_name, t.stop(), roc[3])
        rocs.append(roc)
    
    testing_utils.draw_rocs(rocs, plot_file_name) 
    
    
def test_matrix_fact_ranking(G, X, plot_file_name, options_list):  
    """
    Applies the ranking factorization model and outputs a plot for the aucs.
    
    Parameters:
    -----------
    G: networkx graph.
    X: node attributes matrix. Each row i is the features vector for node i.
    plot_file_name: the name of the figure to be saved. (you can prepend the path as well).
    options_list: a list that contains dictionaries that holds parameters values. 
                  Each dictionary represents a complete test case. All the curves
                  of the test cases will be joined in one plot. Each dictionary must 
                  have the following parameters: options['mf_test_name']
                  options['mf_n_latent_feats'], options['mf_n_folds'], 
                  options['mf_alpha'], options['mf_n_iter']
    """
    t = timer.Timerx()
    rocs = []
    
    for options in options_list:
        test_name = options['mf_test_name']
        t.start()
        roc = supervised_methods.train_matrix_fact_ranking(G, X, test_name, options)
        print "Test name: %s, time: %s, auc: %f\n" % (test_name, t.stop(), roc[3])
        rocs.append(roc)
    
    testing_utils.draw_rocs(rocs, plot_file_name)     
    
    
    
    
    
    
    
    
    