'''
Created on Aug 15, 2014

This module contains the supervised methods (i.e. learners) used with link prediction. 

@author: rockyrock
'''
import testing_utils as tstu
import numpy as np
from sklearn import linear_model
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
import light_srw, srw
from scipy import interp
from sklearn.metrics import roc_curve, auc, roc_auc_score
import matrix_fact


def logistic(X, Y, test_name, random_state=0):
    """
    Train a logistic regression classifier.
    
    Parameters:
    ------------
    X: training dataset.
    Y: labels. (binary)
    test_name: the string that will be used next to the roc cure in the plot.
    
    Returns:
    --------
    roc: the details of a roc to be plotted
    """
    logistic = linear_model.LogisticRegression()
    roc = tstu.train_with_cross_val(X, Y, logistic, test_name, random_state )
    
    return roc

def random_forest(X, Y, test_name, options, random_state=0):
    """
    Train a random forest classifier.
    
    Parameters:
    ------------
    X: training dataset.
    Y: labels. (binary)
    test_name: the string that will be used next to the roc cure in the plot.
    options: a dictionary that holds parameters values.
            For this method we need: options['RF_n_estimators'].
    """
    n_estimators = options['RF_n_estimators']
    random_forest = RandomForestClassifier(n_estimators=n_estimators, n_jobs=-1)
    roc = tstu.train_with_cross_val(X, Y, random_forest, test_name, random_state )
    
    return roc

def rbm_logistic(X, Y, test_name, options, random_state=0):
    """
    Train a Restricted Boltzmann Machine + Logistic regression classifier.
    
    Parameters:
    ------------
    X: training dataset.
    Y: labels. (binary)
    test_name: the string that will be used next to the roc cure in the plot.
    options: a dictionary that holds parameters values.
            For this method we need: options['RBM_n_components'], .
                                    options['RBM_n_iter'], options['n_RBMs'].
    """
    n_components = options['RBM_n_components']
    n_iter = options['RBM_n_iter']
    n_RBMs = options['n_RBMs']
    
    steps = []
    
    logistic = linear_model.LogisticRegression()
    
    for i in xrange(n_RBMs):
        rbm = BernoulliRBM(random_state=random_state, verbose=True, n_components=n_components, n_iter)
        name = "rbm" + str(i)
        steps.append( (name, rbm) )
    
    steps.append( ('logistic', logistic) )
    rbm_logistic_clf = Pipeline(steps=steps) 
    
    roc = tstu.train_with_cross_val(X, Y, rbm_logistic_clf, test_name, random_state )
    
    return roc

def rbm_rf(X, Y, test_name, options, random_state=0):
    """
    Train a Restricted Boltzmann Machine + Random Forest classifier.
    
    Parameters:
    ------------
    X: training dataset.
    Y: labels. (binary)
    test_name: the string that will be used next to the roc cure in the plot.
    options: a dictionary that holds parameters values.
            For this method we need: options['RBM_n_components'], .
                                    options['RBM_n_iter'], options['n_RBMs'], 
                                    options['RF_n_estimators'].
    """
    n_components = options['RBM_n_components']
    n_iter = options['RBM_n_iter']
    n_RBMs = options['n_RBMs']
    n_estimators = options['RF_n_estimators']
    
    steps = []
    
    random_forest = RandomForestClassifier(n_estimators=n_estimators, n_jobs=-1)
    
    for i in xrange(n_RBMs):
        rbm = BernoulliRBM(random_state=random_state, verbose=True, n_components=n_components, n_iter)
        name = "rbm" + str(i)
        steps.append( (name, rbm) )
    
    steps.append( ('random_forest', random_forest) )
    rbm_logistic_clf = Pipeline(steps=steps) 
    
    roc = tstu.train_with_cross_val(X, Y, rbm_logistic_clf, test_name, random_state )
    
    return roc



#################
"""
A wrapper for supervised random walk
"""

def train_with_srw(G, X, test_name,  k=10, delta=5, alpha=0.5, iter=1):
    """
    Applies the Supervised Random Walk method
    
    Parameters:
    ------------
    G: networkx graph.
    X: node attributes matrix. Each row i is the features vector for node i.
    test_name: the name to be plotted next to the curve.
    k: the number of neighbours a node must have in order to be considered a candidate source node.
    delta: the number of edges the candidate source node made in the future that close a triangle. 
           (i.e. the future/destination node is a friend of a friend, so delta is a threshold that sees how many
             of these future/destination nodes are friends of current friends). A candidate source node
             that has a degree above k, and made future friends above delta, then becomes a source node
             for training.
    alpha: restart probability.
    iter: gradient desecent epochs.
    
    Returns:
    --------
    roc: a roc curve details to be plotted.
    typical_auc: an AUC calculated using the same way of calculating the AUC with cross-validation
    my_auc: an AUC calculated using the mean auc of all the testing source nodes. 
    """
    psi = light_srw.GeneralPSI(G=G, X=X, k=k, delta=delta)
    srw_obj = light_srw.SRW(psi=psi, alpha=alpha)
    srw_obj.optimize(iter=iter)
    
    aucs = []
    mean_tpr = 0.0
    mean_fpr = np.linspace(0, 1, 100)
    
    for s in psi.get_testingS():
        P = srw_obj.get_P(s)
        s_index = psi.get_s_index(s)
        probs = srw.rwr(P, s_index, alpha)
        y_test = light_srw.get_y_test(s, psi)
        
        fpr, tpr, thresholds = roc_curve(y_test, probs)
        mean_tpr += interp(mean_fpr, fpr, tpr)
        mean_tpr[0] = 0.0
        
        auc = tstu.compute_AUC(y_test, probs)
        aucs.append(auc)
        
    mean_tpr /= len(psi.get_testingS())
    mean_tpr[-1] = 1.0
    mean_auc1 = auc(mean_fpr, mean_tpr)
    
    roc = (test_name, mean_fpr, mean_tpr, mean_auc1)
    
    aucs = np.array(aucs)
    mean_auc2 = np.mean(aucs)
    
    return roc, mean_auc1, mean_auc2




################
"""
A wrapper for matrix factorization
"""

def train_matrix_fact_normal(G, X, test_name, options):
    """
    Train the normal matrix fact model.
    
    Parameters:
    ------------
    G: networkx object.
    X: node attributes matrix.
    test_name: the name of the roc curve.
    options: a dictionary that holds the following parameters:
            options['mf_n_latent_feats'], options['mf_n_folds'], 
            options['mf_alpha'], options['mf_n_iter'], options['mf_with_sampling']
            
    Returns:
    --------
    roc: a roc curve details to be plotted.
    """

    k = options['mf_n_latent_feats']
    n_folds = options['mf_n_folds']
    alpha = options['mf_alpha']
    n_iter = options['mf_n_iter']
    with_sampling = options['mf_with_sampling']
    
    MF = matrix_fact.Matrix_Factorization(k = k, G=G, X=X )

    fpr, tpr, auc = MF.train_test_normal_model(n_folds = n_folds, 
                    alpha=alpha, n_iter=n_iter, with_sampling = with_sampling)

    roc = (test_name, fpr, tpr, auc)
    
    return roc


def train_matrix_fact_ranking(G, X, test_name, options):
    """
    Train the ranking matrix fact model.
    
    Parameters:
    ------------
    G: networkx object.
    X: node attributes matrix.
    test_name: the name of the roc curve.
    options: a dictionary that holds the following parameters:
            options['mf_n_latent_feats'], options['mf_n_folds'], 
            options['mf_alpha'], options['mf_n_iter']
            
    Returns:
    --------
    roc: a roc curve details to be plotted.
    """

    k = options['mf_n_latent_feats']
    n_folds = options['mf_n_folds']
    alpha = options['mf_alpha']
    n_iter = options['mf_n_iter']
    
    MF = matrix_fact.Matrix_Factorization(k = k, G=G, X=X )

    fpr, tpr, auc = MF.train_test_ranking_model(n_folds = n_folds, 
                    alpha = alpha, n_iter = n_iter)

    roc = (test_name, fpr, tpr, auc)
    
    return roc













