import numpy as np
import matplotlib.pyplot as plt
''' Probability Density '''
def drawHist(self,name):
    plt.hist(self, 60,histtype='stepfilled')
#histtype : {‘bar’, ‘barstacked’, ‘step’, ‘stepfilled’}, optional
    #matplotlib.pyplot.hist(x, bins=10, range=None, normed=False, weights=None, cumulative=False, bottom=None, histtype='bar', align='mid', orientation='vertical', rwidth=None, log=False, color=None, label=None, stacked=False, hold=None, data=None, **kwargs)
    plt.xlabel('%s' %name)
    plt.ylabel('Frequency')
    plt.title('The %s distribution' %name)
    #plt.plot([np.median(self), np.median(self)], [0, 50], 'k--', lw=3)
    #plt.annotate('Median is %s' % np.median(self), xy=(np.median(self), 45),xytext=(np.median(self)+10000,45),arrowprops=dict(facecolor='blue', shrink=0) )
    plt.show()

def Histgra_curve(self,n,name):
    a =np.array(self)
    deltax=(np.max(a)-np.min(a))/n
    hist, bin_edges = np.histogram(a, bins=n)
    bins=np.zeros(n)
    print (len(bin_edges))
    for i in range(n):
        bins[i]=(bin_edges[i]+bin_edges[i+1])/2
    d = pd.DataFrame(bins, columns=[name])
    d['hist'] = hist/deltax
    d=np.transpose(d)
    e=name+'%s' %n
    np.savetxt(e+'.csv', d, fmt='%.8f',delimiter=',')
    plt.figure(figsize=(8,4))
    plt.plot(bins,hist/deltax)
    plt.title(name+'%s'%n)
    plt.savefig(e+'.pdf')
    plt.show()
    return hist, bins

'''Normalization'''
def MedianNormalzation(x,median,b):
    x = (x-median)/(median*b)
    return x

def MaxMinNormalization(x,Max,Min):
    x = (x-Min)/(Max-Min)
    return x
# z分数标准化
def GaussianNormalization(x,mean,epsilon):
    x = (x-mean)/epsilon
    return x

def Binary(self):
    a = len(self)
    for i in range (a):
        if self[i] > 0 :
            self[i]=1
        else:
            self[i]=0
    return self

'''Classfication by sort'''
def clf_sort(self,name,n,up_split,down_split):
    a = len(self)
    output_lev = np.zeros((a,))
    stage = np.zeros((n,))
    stage[0] = np.percentile(self, down_split)
    stage[n - 1] = np.percentile(self, up_split)
    delta_stage = (stage[n - 1] - stage[0]) / (n-2)
    for i in range(1, n - 1):
        stage[i] = stage[0] + delta_stage * i

    for i in range(0, a):
        if self[i] <= stage[0]:
            output_lev[i] = 1
        elif self[i] > stage[n - 1]:
            output_lev[i] = n
        else:
            for j in range(1, n - 1):
                if self[i] > stage[j - 1] and self[i] <= stage[j]:
                    output_lev[i] = j + 1
    print('The boundary of %s classification is %s' % (name, stage))
    return output_lev

def clf_sort_mix(max_hp,med_hp,name):
    a = len(max_hp)
    output_lev = np.zeros((a,))
    stage = np.zeros((4,))
    stage[0] = np.percentile(max_hp, 40)
    stage[1] = np.percentile(max_hp, 80)
    stage[2] = np.percentile(med_hp, 40)
    stage[3] = np.percentile(med_hp, 80)
    for i in range(0, a):
        if max_hp[i] <= stage[0] and med_hp[i] <= stage[2]:
            output_lev[i] = 1
        elif max_hp[i] > stage[0] and max_hp[i] <= stage[1] and med_hp[i] <= stage[2]:
            output_lev[i] = 2
        elif max_hp[i] > stage[1] and med_hp[i] <= stage[2]:
            output_lev[i] = 3
        elif max_hp[i] <= stage[0] and med_hp[i] > stage[2] and med_hp[i] <= stage[3]:
            output_lev[i] = 4
        elif max_hp[i] > stage[0] and max_hp[i] <= stage[1] and med_hp[i] > stage[2] and med_hp[i] <= stage[3]:
            output_lev[i] = 5
        elif max_hp[i] > stage[1] and med_hp[i] > stage[2] and med_hp[i] <= stage[3]:
            output_lev[i] = 6
        elif max_hp[i] <= stage[0] and med_hp[i] > stage[3]:
            output_lev[i] = 7
        elif max_hp[i] > stage[0] and max_hp[i] <= stage[1] and med_hp[i] > stage[3]:
            output_lev[i] = 8
        elif max_hp[i] > stage[1] and med_hp[i] > stage[3]:
            output_lev[i] = 9
        else:
            print ("there are some value didn't scan!!")
    print('The boundary of %s classification is %s' %(name,stage))

    return output_lev

def WorkHome_clf(self,down_slpit,up_slpit):
    a = len(self)
    output_lev = np.zeros((a,))
    for i in range(0, a):
        if self[i] <= down_slpit:
            output_lev[i] = 0
        elif self[i] > up_slpit:
            output_lev[i] = 1
        elif self[i] <= up_slpit and self[i] > down_slpit:
            output_lev[i] = 2
        else:
            print ('***************ERROR for WorkHome ratio**************')
    print('The boundary of Jobs-housing ratio classification is [%s,%s]'%(down_slpit,up_slpit) )
    return output_lev


'''Feactures space'''
def cov_define(dataSet): # 协方差
    num_dim = dataSet.shape[0]
    num_samples = dataSet.shape[1]
    cov_mat = np.zeros((num_dim, num_dim))
    mean_data = np.mean(dataSet,1)  # 求每一行的平均值
    move_mean_data = (dataSet.transpose() - mean_data).transpose()
    #print (cov_mat)
    for i in range(0, num_dim):
        for j in range(0, num_dim):
            cov_mat[i,j] = np.sum(move_mean_data[i,:] * move_mean_data[j,:]) / (num_samples - 1)
    return  cov_mat
# 取前n个主成分，并求每个主成分中各个feature的因子
def eig_pc(self,n_features):
    X_trans=np.transpose(self)
    Cov_X=cov_define(X_trans)
    (eigV, eigVector) = np.linalg.eigh(Cov_X)
    print ('The influnce factors of  are %s'%eigV)
    eig_pc=np.zeros((n_features,len(eigV)))
    for i in range(len(eigV)-1,len(eigV)-1-n_features,-1):
        for j in range (0,len(eigV),1):
            eig_pc[len(eigV)-1-i,j]=eigV[i]*eigVector[i,j]
    return eig_pc
# 之前求的主成分中只保留绝对值最高的前n个feature
def Abs_eig_func(self,n_feactures):
    (a,b)=np.shape(self)
    eig_pc2=self
    pencile=n_feactures/b*100
    for i in range(a):
        for j in range(b):
            eig_pc2[ i, j] = abs(eig_pc2[i,j])
    for i in range(a):
        a=np.percentile(eig_pc2[i,:],100-pencile)
        for j in range(b):
            if eig_pc2[i,j] <a:
                eig_pc2[i,j]=0
            else:
                eig_pc2[i,j]=self[i,j]
    return eig_pc2
# kmeans 轮廓系数
def silhouette(data, cluster_ids):
    """
    Computes the silhouette score for each instance of a clustered dataset,
    which is defined as:
        s(i) = (b(i)-a(i)) / max{a(i),b(i)}
    with:
        -1 <= s(i) <= 1

    Args:
        X    : A M-by-N array of M observations in N dimensions
        cIDX : array of len M containing cluster indices (starting from zero)

    Returns:
        s    : silhouette value of each observation
    """

    N = data.shape[0]              # number of instances
    K = len(np.unique(cluster_ids))    # number of clusters

    # compute pairwise distance matrix
    D = squareform(pdist(data))

    # indices belonging to each cluster
    kIndices = [np.flatnonzero(cluster_ids==k) for k in range(K)]

    # compute a,b,s for each instance
    a = np.zeros(N)
    b = np.zeros(N)
    for i in range(N):
        # instances in same cluster other than instance itself
        a[i] = np.mean( [D[i][ind] for ind in kIndices[cluster_ids[i]] if ind!=i] )
        # instances in other clusters, one cluster at a time
        b[i] = np.min( [np.mean(D[i][ind])
                        for k,ind in enumerate(kIndices) if cluster_ids[i]!=k] )
    s = (b-a)/np.maximum(a,b)

    return s

def silhouetteForme (k,datadef):
    cluster_centroids, cluster_ids = kmeans2(datadef, k)
    s = silhouette(datadef, cluster_ids)
    print("For " + str(k) + " clusters, the average silhouette score is " + str(sp.mean(s)))
