import scipy.special as special

def print_function(x):
    print(x)
    
def print_warning(x):
    print(x)
    
def cal_entroy(x):
    '''
    Compute the shannon entropy.
    
    Parameters
    ----------
    x : array-like
        The list value of the variant.
        
    Returns
    -------
    entroy : float
        The shannon entropy value.
    '''
    counts = {} 
    length = len(x)
    for c in x:
        counts[c] = counts.get(c, 0) + 1
    entroy = 0.0
    for count in counts.values():
        p = count / float(length)
        entroy -= p * np.log2(p) if p != 0 else 0.0
    return entroy

def cal_info_gain_rate(x, y):
    '''
    Compute the info gain rate.
    
    Parameters
    ----------
    x : array-like
        The feature variant list.
    y : array-like
        The target variant list.
        
    Returns
    -------
    igr : float
        The info gain rate value.
    '''
    x_entropy = cal_entroy(x)
    if x_entropy == 0.0:
        return 0.0
    value2list = {}
    length = len(x)
    for i,e in enumerate(x):
        value2list[e] = value2list.get(e, [])
        value2list.get(e).append(y[i])
    igr = 0.0
    for e in value2list.keys():
        igr += len(value2list[e]) / float(length) * cal_entroy(value2list[e])
    igr = (cal_entroy(y) - igr) / x_entropy
    return igr

class BayesianSmoothing(object):
    def __init__(self, alpha, beta):
        self.alpha = alpha
        self.beta = beta

    def sample(self, alpha, beta, num, imp_upperbound):
        sample = numpy.random.beta(alpha, beta, num)
        I = []
        C = []
        for clk_rt in sample:
            imp = random.random() * imp_upperbound
            imp = imp_upperbound
            clk = imp * clk_rt
            I.append(imp)
            C.append(clk)
        return I, C

    def update(self, imps, clks, iter_num, epsilon):
        for i in range(iter_num):
            new_alpha, new_beta = self.__fixed_point_iteration(imps, clks, self.alpha, self.beta)
            if abs(new_alpha-self.alpha)<epsilon and abs(new_beta-self.beta)<epsilon:
                break
            self.alpha = new_alpha
            self.beta = new_beta

    def __fixed_point_iteration(self, imps, clks, alpha, beta):
        numerator_alpha = 0.0
        numerator_beta = 0.0
        denominator = 0.0

        for i in range(len(imps)):
            numerator_alpha += (special.digamma(clks[i]+alpha) - special.digamma(alpha))
            numerator_beta += (special.digamma(imps[i]-clks[i]+beta) - special.digamma(beta))
            denominator += (special.digamma(imps[i]+alpha+beta) - special.digamma(alpha+beta))

        return alpha*(numerator_alpha/denominator), beta*(numerator_beta/denominator)