import time
import random
import functools
import math
import logging
import numpy as np

from operator import add
import scipy.stats.distributions


"""
TODO: Documentation
"""

def dotproduct(v1, v2):
    return sum((a*b) for a, b in zip(v1, v2))

def length(v):
    return math.sqrt(dotproduct(v, v))

def angle(v1, v2):
    return math.acos(dotproduct(v1, v2) / (length(v1) * length(v2)))
    
def normalize(v):
    vmag = length(v)
    return [(val+0.0)/vmag for val in v]
    
    
def sigmoidal_weight_filter(weight, maxweight):
    return maxweight/(1 + math.exp(-(maxweight*math.e)*weight + (maxweight*math.e)))

def scale_timecourse(timecourse):
    if float(max(timecourse)) == 0.00:
        return timecourse
    return [val/max(timecourse) for val in timecourse]

def list_to_matrix(list,rows,cols):
    gen = (val for val in list)
    matrix = []
    for x in range(rows):
        row = []
        for y in range(cols):
            row.append(next(gen))
        matrix.append(row)
    return matrix

def print_list_as_matrix(list,rows,cols):
    gen = (val for val in list)
    matrix = []
    for x in range(rows):
        row = []
        for y in range(cols):
            row.append(next(gen))
        print row
    return

def log_list_as_matrix(list,rows,cols):
    gen = (val for val in list)
    matrix = []
    for x in range(rows):
        row = []
        for y in range(cols):
            row.append(next(gen))
        logging.debug(row)
    return

def list_duplicates_of(seq,item):
    start_at = -1
    locs = []
    while True:
        try:
            loc = seq.index(item,start_at+1)
        except ValueError:
            break
        else:
            locs.append(loc)
            start_at = loc
    return locs

def list_accumulate(l1,l2):
    for i,item in enumerate(l1):
        l2[i] = l2[i] + item
    return l2[:]

def list_subtract(l1,l2):
    for i,item in enumerate(l2):
        l1[i] = l1[i] - item
    return l1[:]

def list_duplicates(seq):
    '''
    Returns the list duplicates of a seqeuence
    
    @return generator object listing duplicates as (key, indices)
    '''
    tally = dict()
    for i, item in enumerate(seq):
        try:
            tally[item].append(i)
        except Exception:
            tally[item] = [i]
    return ((key,locs) for key,locs in tally.items() if len(locs)>1)

def accumulate_duplicates(value_list_tuples):
    result_list = value_list_tuples[:]
    
    duplicates_by_index = list(list_duplicates([v[0] for v in value_list_tuples]))
    
    #Remove all duplicates from the list starting from highest index
    for index in sorted(reduce(add,[val[1] for val in duplicates_by_index]), reverse=True):
        result_list.pop(index)
    
    #Accumulate the duplicate entries' lists, then add to result list
    for dup in duplicates_by_index:
        to_be_added = [value_list_tuples[indices][1] for indices in dup[1]]
        accumulator = functools.reduce(list_accumulate,to_be_added)
        result_list.append((dup[0],accumulator))
        
    return result_list

def chunks(l, n):
    return [l[i:i+n] for i in range(0, len(l), n)]

def bubble_sort(lst):
    lst2 = lst[:]
    x,y = 0,0
    for x in range(0,len(lst)):
        for y in range(x,len(lst)):
            curr = lst2[x]
            next = lst2[y]
            if curr > next:
                lst2[y] = curr
                lst2[x] = next
    return lst2

def fill_arr(size):
    lst = []
    r = random.Random(time.time())
    for x in range(0,size):
        lst.append(r.randint(0,100))
    return lst

def logistic_sigmoid(x):
    return (1/( 1 + math.pow(math.e,(-1*x))))

def sigmoid(args):
    if type(args) is int or type(args) is float: #type checking
        x = args
        return (1/(1 + math.pow(0.5/x,(5))))
    elif len(args) == 3:
        x , intercept, slope = args #tuple unpacking
        return (1/( 1 + math.pow(intercept/x,slope)))
    else:
        return None #null type

def floor_to_zero(val):
    return 0 if val < 0 else val

def gaussian(args):
    r = random.Random(time.time())
    if args == None:
        return r.gauss(0.0,1.0)
    elif len(args) == 2:
        mu, sigma = args
        return r.gauss(mu,sigma)
    else:
        return None

"""
Calculate standard deviation of data x[]:
    std = sqrt(\sum_i (x_i - mean)^2 \over n-1)
"""
def stdv(x):
    n, std = len(x), 0
    mean = calc_mean(x)
    for a in x:
        std = std + (a - mean)**2
        std = math.sqrt(std / float(n-1))
    return std

def calc_mean(x):
    return sum(x)/len(x)
    
def t_test(pop1, pop2):
    mu1 = calc_mean(pop1)
    mu2 = calc_mean(pop2)
    var1 = math.pow(stdv(pop1), 2)
    var2 = math.pow(stdv(pop2), 2)
    num1 = len(pop1)
    num2 = len(pop2)
    t_statistic = (mu1 - mu2)/math.sqrt((var1/num1) + (var2/num2))
    return t_statistic


def _ttest_finish(df,t):
    """Common code between all 3 t-test functions."""
    prob = scipy.stats.distributions.t.sf(np.abs(t), df) * 2 #use np.abs to get upper tail
    if t.ndim == 0:
        t = t[()]

    return t, prob
    
def ttest_ind(a, b, axis=0, equal_var=True):
    """
    Calculates the T-test for the means of TWO INDEPENDENT samples of scores.

    This is a two-sided test for the null hypothesis that 2 independent samples
    have identical average (expected) values. This test assumes that the
    populations have identical variances.

    Parameters
    ----------
    a, b : array_like
        The arrays must have the same shape, except in the dimension
        corresponding to `axis` (the first, by default).
    axis : int, optional
        Axis can equal None (ravel array first), or an integer (the axis
        over which to operate on a and b).
    equal_var : bool, optional
        If True (default), perform a standard independent 2 sample test
        that assumes equal population variances [1]_.
        If False, perform Welch's t-test, which does not assume equal
        population variance [2]_.

    Returns
    -------
    t : float or array
        The calculated t-statistic.
    prob : float or array
        The two-tailed p-value.

    Notes
    -----
    We can use this test, if we observe two independent samples from
    the same or different population, e.g. exam scores of boys and
    girls or of two ethnic groups. The test measures whether the
    average (expected) value differs significantly across samples. If
    we observe a large p-value, for example larger than 0.05 or 0.1,
    then we cannot reject the null hypothesis of identical average scores.
    If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
    then we reject the null hypothesis of equal averages.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test

    .. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test

    Examples
    --------
    >>> from scipy import stats
    >>> np.random.seed(12345678)

    Test with sample with identical means:

    >>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
    >>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
    >>> stats.ttest_ind(rvs1,rvs2)
    (0.26833823296239279, 0.78849443369564776)
    >>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
    (0.26833823296239279, 0.78849452749500748)

    `ttest_ind` underestimates p for unequal variances:

    >>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
    >>> stats.ttest_ind(rvs1, rvs3)
    (-0.46580283298287162, 0.64145827413436174)
    >>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
    (-0.46580283298287162, 0.64149646246569292)

    When n1 != n2, the equal variance t-statistic is no longer equal to the
    unequal variance t-statistic:

    >>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
    >>> stats.ttest_ind(rvs1, rvs4)
    (-0.99882539442782481, 0.3182832709103896)
    >>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
    (-0.69712570584654099, 0.48716927725402048)

    T-test with different means, variance, and n:

    >>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
    >>> stats.ttest_ind(rvs1, rvs5)
    (-1.4679669854490653, 0.14263895620529152)
    >>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
    (-0.94365973617132992, 0.34744170334794122)

    """
    a = np.array(a)
    b = np.array(b)
    
    v1 = np.var(a, axis, ddof=1)
    v2 = np.var(b, axis, ddof=1)
    n1 = a.shape[axis]
    n2 = b.shape[axis]

    if (equal_var):
        df = n1 + n2 - 2
        svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
        denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
    else:
        vn1 = v1 / n1
        vn2 = v2 / n2
        df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))

        # If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
        # Hence it doesn't matter what df is as long as it's not NaN.
        df = np.where(np.isnan(df), 1, df)
        denom = np.sqrt(vn1 + vn2)

    d = np.mean(a, axis) - np.mean(b, axis)
    t = np.divide(d, denom)
    t, prob = _ttest_finish(df, t)

    return t, prob