"""
This module lets you compare the statistical performances of two 
machine learning algorithms A and B on several datasets. 

Let trn_i and tst_i be the ith training and testing dataset for i = 1,2,...,N.
Let a_i = A( trn_i ) and b_i = B( trn_i ) be classifiers produced by the learning algorithms. 

Then, for each dataset and for both classifiers, we can collect the stream of errors 
err_a_i = a_i( tst_i.x ) != tst_i.y               # x, y and err_a_i are arrays of the same size
err_b_i = b_i( tst_i.x ) != tst_i.y               # x, y and err_b_i are arrays of the same size

Using countJointError, you can count the different outcomes of the joint streams of error.
k01_i, k10_i =  countJointError( err_a_i, err_b_i )

Next, it is possible to obtain the probability that a_i is better than b_i.
p_i = pBetterCorrClassifier( k01_i, k10_i )

Finally, you can obtain the probability that A is better than B:
p = pAvsB( [p_1, p_2, ... p_N ] )

"""

import numpy as np 
from scipy.stats import beta
from scipy.stats import t as student_t
from scipy.special import btdtr # cumulative beta distribution


ZERO_ONE = 'zero-one'

def pBetterCorrClassifier(k01, k10, a01=1, a10=1):
    """
    Returns the probability that the classifier 1 is better than the classifier 2.
    Both classifiers must be tested on the same sets.  
    
    k01 : represents the number of time the first classifier answered correctly while the second did not.
    k10 : represents the opposite. 
    a01 and a10 are related to the prior and should be left to their default values. 
    
    Note : the answer doesn't depends on the total number of examples in the test set. 
    It only depends on the examples on which they disagree.
    
    >>> pBetterCorrClassifier( 100, 95 )
    0.63945756262
    
    """
    
    # some sanity check
    k01 = np.asarray( k01 )
    k10 = np.asarray( k10 )
    assert (k01 >= 0).all()
    assert (k10 >= 0).all()
    # cumulative beta distribution
    return btdtr( k10+a10, k01+a01, 0.5 )

    return k01, k10


class ComparePredictor_t:
    
    def __init__(self, beta_0 = 0, mu_0 = 0, l_0 = 1, alpha_0 = 1 ):
        self.mu_0 = mu_0 # related to the initial estimation of the mean 
        self.beta_0  = beta_0 # >= 0. Related to the initial estimation of the variance >= 0
        self.alpha_0 = alpha_0 # >=1. The number of pseudo-observations in beta_0 (alpha_0 >= 1)  
        self.l_0 = l_0 # > 0. The number of pseudo-observations in mu_0 > 0 
    
    def _get_posterior_param(self,dLossL):
        m = len(dLossL)
        
        l_m = self.l_0 + m
        alpha_m = self.alpha_0 + m/2.
        
        avg = np.mean(dLossL)
        var = np.var(dLossL)
        mu_m = (self.l_0*self.mu_0 + m*avg)/ l_m
    
        beta_m = self.beta_0 + 0.5*m*var + (self.l_0*m*(avg-self.mu_0)**2)/(2.*l_m)
    
        return 2.*alpha_m, mu_m, beta_m / ( l_m * alpha_m )
    
    def __call__(self,dLossL):
        nu, mu, var = self._get_posterior_param(dLossL)
        
        p = student_t.cdf(0, nu, loc=mu, scale=np.sqrt(var))
        
        print 't : %.3g'%p
        return p

def is_zero_one( dLossL ):
    value_set = set(np.unique( dLossL ))
    return set([-1,0,1] ).issuperset( value_set )

class ComparePredictor_zero_one:
    
    def __init__(self, a01=1, a10=1):
        self.a01 = a01
        self.a10 = a10
        
    def __call__(self, dLossL):
                
        if not is_zero_one(dLossL):
            import sys
            sys.stderr.write( 
                "***WARNING***: You are using a zero_one based estimator with non-zero-one observations." )
        
        epsilon = 1e-8
        k10 = (dLossL >  epsilon).sum()
        k01 = (dLossL < -epsilon).sum()
        
        return pBetterCorrClassifier(k01, k10, self.a01, self.a10 )
 
def meta_estimator(dLossL):
    
    if is_zero_one(dLossL):
        estimator = ComparePredictor_zero_one()
    else:
        estimator = ComparePredictor_t()
    
    return estimator(dLossL)
 
def poissonBinomial(pL):
    """
    The Poisson Binomial distribution is a generalization of the binomial distribution where the 
    individual Bernoulli trials have different probabilities.
    
    This functions computes the probability of observing k positive events for k = 0, 1, ... N. 
    where N = len( pL ) and pL corresponds to the list of the individual probabilities. 
     
    >>> poissonBinomial( [ 0.3, 0.4, 0.1 ] ) 
    [0.378, 0.45599999999999996, 0.154, 0.012]
    
    """
    
    pL = np.asarray(pL)
    assert np.all( (pL>=0) *  (pL<=1) )
    
    qL = np.zeros( len(pL) + 1 )
    qL[0] = 1
    
    for p in pL:
        qL_ = (1-p)*qL 
        qL_[1:] += p*qL[:-1]
        qL = qL_
        
    return qL



def pAvsB( pL ):
    """
    Computes the probability that algorithm A is better than algorithm B.
    
    pL : corresponds to the individual probabilities that classifier produced 
    by A is better than classifier produced by B.
    """
    
    Np1 = len(pL)+1
    qL = poissonBinomial( pL )
    kV = np.arange( Np1 )
    betaV = beta.cdf( 0.5, Np1 - kV, kV+1 )
    
    return np.dot( qL, betaV )  




def pbTest( diffLD, dLossEstimator = meta_estimator ):
    """
    diffLD is a dictionary of diffL where diffL is a list of test loss differences. 
    diffL[i] = l( h, x_i, y_i ) - l( g, x_i, y_i ) 
    The keys of diffLD represent dataset names and they could be anything
    
    The test returns a dictionary of probabilities with the same keys as in diffLD. 
    Each value represent the probability that classifier h was better than classifier g given diffL on the given dataset
    The test also return a single probability stating the probability that algorithm A is better than algorithm B
    """
    pD = {}
    for key, diffL in diffLD.items():
        pD[key] = dLossEstimator(diffL)
    return pD, pAvsB( pD.values() )


def uBinBound(m,k,delta=0.1, uniformPrior=False):
    """Upper bound of the true risk"""
    return beta.ppf( 1-delta, k+1, m-k+ int(uniformPrior) )

def lBinBound(m,k,delta=0.1, uniformPrior=False):
    """Lower bound of the true risk"""
    return beta.isf( 1-delta, k+1, m-k+ int(uniformPrior) )
  

def binBound(m,k,delta=0.1, uniformPrior=False):
    """high level call for lower and upper bound"""
    assert abs(int(m) - m) < 1e-8
    assert abs(int(k) - k) < 1e-8
    assert delta <=0.5 and delta >0
    assert m >= k
    assert m > 0
    assert k >= 0

    return lBinBound(m,k,delta, uniformPrior), uBinBound(m,k,delta, uniformPrior) 
