

from numpy import *
import operator as op
import numpy as np

def normalize(props):
    '''将属性各个分量标准化在[0,1]区间

    returns
    -------
    norm_props: 标准化后的属性
    ranges:     属性分类的范围
    min_values: 属性分量的最小值
    '''

    (min_values, max_values) = (props.min(0), props.max(0))
    ranges = max_values - min_values
    ranges[ranges==0] = 1
    norm_props = (props - min_values) / ranges
    return norm_props, ranges, min_values

def frequency(labels):
    '''统计labels各个值出现的次数
    Parameters
    ----------
    labels: [label]
        标签
    Returns
    freq: {label: Integer}
        标签和相应的次数，
    '''
    freq = {}
    for l in labels:
        freq[l] = freq.get(l, 0) + 1

    return freq

def sort_dict(d, sort_key=True, ascend=True):
    '''对dict进行排序

    Parameters
    ----------
    d: {}
    sort_key: Boolean
        是否对key排序
    ascend: Boolean
        是否按升序排列

    Returns
    -------
    sorted_d: [[key, item]]
        排列好的[key,item]列表
    '''
    return sorted(d.items(), key=op.itemgetter(0 if sort_key else 1), reverse=not ascend)



def newton_down_hill(x, f, fd, fdata=None, alpha=1, eps=0.0001):
    y = f(x)
    normy = norm(y)
    while norm(y) > eps and alpha > eps:
    
        x1 = x - alpha * fd(x).I.T*y
        y1 = f(x1)
        normy1 = norm(y1)
        if normy1 < normy:
            (x, y, normy) = (x1, y1, normy1) 
        else:
            alpha = alpha / 2
        print(alpha)
    return (x, y)

def gradient_descent(x, f, fd, step=1, eps=0.001):
    '''梯度下降法求解函数的最小值
    
    Parameters
    ----------
    x: 
        初值
    f, fd:
        函数以及其导数
    step:
        步长
    eps: 
        收敛误差

    Returns
    -------
    x: 
        极值点
    y:
        f(x)
    '''

    (x_curr, y_curr, diff) = (x, f(x), inf)

    while diff > eps and step > eps:
        x_next = x_curr - step * fd(x_curr)
        y_next = f(x_next)
        print("x_curr, y_next = %s, %s" % (x_curr, y_next))
        if y_next < y_curr:
            (x_curr, y_curr, diff) = (x_next, y_next, y_curr - y_next)
        else:
            step = step * 0.5

    return (x_curr, y_curr)

def euclidean(x, ys):
    # distance = sqrt((x-x0)^2 + (x-x1)^2 + ... )
   
    return sqrt(sum((x - ys)**2, axis=1))

class KDTree:
    def __init__(self, leafsize):
        self.leafsize = leafsize

    def create(self, props):
        self.props = props.copy()
        self.tree = self._create(range(len(props)))

    def _create(self, rows):
        if len(rows) > self.leafsize:

            # 计数方差
            var = np.var(self.props[rows,:], 0)
            col = var.argsort()[-1]

            p = self.props[rows, col].argsort()

            s = len(p) / 2

            return [col, self.props[rows[p[s]],:][col], self._create([rows[i] for i in p[:s]]), self._create([rows[i] for i in p[s:]])]

        else:
            return rows

    def _is_leaf(self, tree):
        return len(tree) != 4 or type(tree[2]) != list

    def find_nearest_neighbors(self, x, k):
        nearests = [0, [[np.inf, -1] for i in range(k)]]

        self._find_nearest_neighbor(x, self.tree, nearests)
        
        return sorted(nearests[1], key=op.itemgetter(0))
        return nearests[1]

    def _find_nearest_neighbor(self, x, tree, nearests):

        trace = []

        # 找到最接近的叶子节点
        subtree = tree
        while not self._is_leaf(subtree):
            trace.append(subtree)
            #print(subtree)
            subtree = subtree[2] if x[subtree[0]] < subtree[1] else subtree[3]

        
        distances = euclidean(x, self.props[subtree, :])

        for i in range(len(subtree)):
            if distances[i] < nearests[1][nearests[0]][0]:
                nearests[1][nearests[0]] = (distances[i], subtree[i])
                
                nearests[0] = 0
                for j in range(1, len(nearests[1])):
                    if nearests[1][nearests[0]][0] < nearests[1][j][0]:
                        nearests[0] = j

        while trace:
            if x[trace[-1][0]] - trace[-1][1] <= 0 and trace[-1][1] - x[trace[-1][0]] < nearests[1][nearests[0]][0] :
                self._find_nearest_neighbor(x, trace[-1][3], nearests)
            elif x[trace[-1][0]] - trace[-1][1] > 0 and x[trace[-1][0]] - trace[-1][1] < nearests[1][nearests[0]][0]:
                self._find_nearest_neighbor(x, trace[-1][2], nearests)
            else:
                pass

            trace.pop()





def replace_nan_with_mean(xs):
    for i in range(len(xs[0,:])):
        m = np.mean(xs[:,i][~np.isnan(xs[:,i])])
        xs[:,i][np.isnan(xs[:,i])] = m

