import numpy as np
import heapq


# kd-tree每个结点中主要包含的数据结构如下
class KdNode(object):
    def __init__(self, data, label, axis, left, right):
        self.data = data  # k维向量节点(k维空间中的一个样本点)
        self.label = label  # 样本点的分类标签
        self.axis = axis  # 整数（进行分割维度的序号）
        self.left = left  # 该结点分割超平面左子空间构成的kd-tree
        self.right = right  # 该结点分割超平面右子空间构成的kd-tree


class KDTree(object):
    def __init__(self, data_set, labels):
        data_set = np.asarray(data_set).astype(np.float32)
        labels = np.asarray(labels)
        assert len(data_set.shape) == 2, '数据维数必须为2'
        assert len(labels.shape) == 1, '标记的维数必须是1'
        assert data_set.shape[0] == labels.shape[0], '标记长度与样本数量不一致'

        def createNode(split_axis, data, _labels):
            if (data is None) or data.shape[0] == 0:
                return None
            # 按照分割的轴进行排序
            sort_index = np.argsort(data[:, split_axis])
            data = data[sort_index]
            _labels = _labels[sort_index]
            split_pos = data.shape[0] // 2
            median = data[split_pos]
            median_label = _labels[split_pos]
            split_next = (split_axis + 1) % (data.shape[1])

            # 递归创建kd树
            return KdNode(median, median_label, split_axis,
                          createNode(split_next, data[:split_pos], _labels[:split_pos]),
                          createNode(split_next, data[split_pos + 1:], _labels[split_pos + 1:]))

        self.root = createNode(0, data_set, labels)

    # 先序遍历
    def _preOrder(self):
        def preorder(node):
            print(node.data, node.label)
            if node.left:
                preorder(node.left)
            if node.right:
                preorder(node.right)

        preorder(self.root)

    def __search_k_node(self, node: KdNode, aim_data, result_heap, dist_generator):
        if node is None:
            return

        cur_dist = dist_generator(aim_data, node.data)
        if cur_dist < -result_heap[0][0]:  # 当前距离至少比一个已存距离小
            heapq.heapreplace(result_heap, (-cur_dist, node))  # 存入新的距离值和结点到最小堆中

        split_plane = node.data[node.axis]
        plane_dist = abs(aim_data[node.axis] - split_plane)

        # 递归访问各子节点，如果目标点位置在平面左侧，则访问左结点，反之亦然
        if aim_data[node.axis] < split_plane:
            self.__search_k_node(node.left, aim_data, result_heap, dist_generator)
        else:
            self.__search_k_node(node.right, aim_data, result_heap, dist_generator)

        # 判断超球体与另一个结点对应的超矩形区域是否相交
        if plane_dist < -result_heap[0][0]:
            # 在另一个结点对应的划分中搜索最邻近点
            if aim_data[node.axis] < split_plane:
                self.__search_k_node(node.right, aim_data, result_heap, dist_generator)
            else:
                self.__search_k_node(node.left, aim_data, result_heap, dist_generator)

    def predict(self, input_data, k=1):
        aim_data = np.array(input_data)
        assert aim_data.shape == self.root.data.shape, '输入数据维度有误'
        result_heap = [(float('-inf'), None)] * k

        def dist_generator(vec1, vec2):
            dist = np.sqrt(np.sum(np.square(vec1 - vec2)))
            return dist

        self.__search_k_node(self.root, input_data, result_heap, dist_generator)
        # 距离加权投票
        vote_map = {}
        for x in result_heap:
            if x[1] is not None:
                vote_map[x[1].label] = vote_map.get(x[1].label, 0) + 1

        predict_class = max(vote_map, key=vote_map.get)
        return predict_class


if __name__ == '__main__':
    dataSet = [[2, 3], [5, 4], [9, 6], [4, 7], [8, 1], [7, 2]]
    labelSet = [23, 54, 96, 47, 81, 72]
    kd = KDTree(dataSet, labelSet)
    print(kd.predict([2.4, 3], 1))

