{
 "metadata": {
  "signature": "sha256:d802157eab6f42af73afc5ed8a4c0b4bde8d2b803a8c1febb31d46a67d68986c"
 },
 "nbformat": 3,
 "nbformat_minor": 0,
 "worksheets": [
  {
   "cells": [
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "KDTree example\n",
      "======================================================================\n",
      "\n",
      "**'Note: there is an implementation of a kdtree in scipy:\n",
      "<http://docs.scipy.org/scipy/docs/scipy.spatial.kdtree.KDTree/> It is\n",
      "recommended to use that instead of the below.**'\n",
      "\n",
      "This is an example of how to construct and search a\n",
      "[kd-tree](http://en.wikipedia.org/wiki/Kd-tree) in\n",
      "[Python](http://www.python.org)with NumPy. kd-trees are e.g. used to\n",
      "search for neighbouring data points in multidimensional space. Searching\n",
      "the kd-tree for the nearest neighbour of all n points has O(n log n)\n",
      "complexity with respect to sample size.\n",
      "\n",
      "### Building a kd-tree"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "#!python numbers=disable\n",
      "\n",
      "# Copyleft 2008 Sturla Molden\n",
      "# University of Oslo\n",
      "\n",
      "#import psyco\n",
      "#psyco.full()\n",
      "\n",
      "import numpy\n",
      "\n",
      "def kdtree( data, leafsize=10 ):\n",
      "    \"\"\"\n",
      "    build a kd-tree for O(n log n) nearest neighbour search\n",
      "\n",
      "    input:\n",
      "        data:       2D ndarray, shape =(ndim,ndata), preferentially C order\n",
      "        leafsize:   max. number of data points to leave in a leaf\n",
      "\n",
      "    output:\n",
      "        kd-tree:    list of tuples\n",
      "    \"\"\"\n",
      "    \n",
      "    ndim = data.shape[0]\n",
      "    ndata = data.shape[1]\n",
      "\n",
      "    # find bounding hyper-rectangle\n",
      "    hrect = numpy.zeros((2,data.shape[0]))\n",
      "    hrect[0,:] = data.min(axis=1)\n",
      "    hrect[1,:] = data.max(axis=1)\n",
      "\n",
      "    # create root of kd-tree\n",
      "    idx = numpy.argsort(data[0,:], kind='mergesort')\n",
      "    data[:,:] = data[:,idx]\n",
      "    splitval = data[0,ndata/2]\n",
      "\n",
      "    left_hrect = hrect.copy()\n",
      "    right_hrect = hrect.copy()\n",
      "    left_hrect[1, 0] = splitval\n",
      "    right_hrect[0, 0] = splitval\n",
      "    \n",
      "    tree = [(None, None, left_hrect, right_hrect, None, None)]\n",
      "    \n",
      "    stack = [(data[:,:ndata/2], idx[:ndata/2], 1, 0, True),\n",
      "             (data[:,ndata/2:], idx[ndata/2:], 1, 0, False)]\n",
      "\n",
      "    # recursively split data in halves using hyper-rectangles:\n",
      "    while stack:\n",
      "        \n",
      "        # pop data off stack\n",
      "        data, didx, depth, parent, leftbranch = stack.pop()\n",
      "        ndata = data.shape[1]\n",
      "        nodeptr = len(tree)\n",
      "\n",
      "        # update parent node\n",
      "\n",
      "        _didx, _data, _left_hrect, _right_hrect, left, right = tree[parent]\n",
      "        \n",
      "        tree[parent] = (_didx, _data, _left_hrect, _right_hrect, nodeptr, right) if leftbranch \\\n",
      "            else (_didx, _data, _left_hrect, _right_hrect, left, nodeptr)\n",
      "\n",
      "        # insert node in kd-tree\n",
      "\n",
      "        # leaf node?\n",
      "        if ndata <= leafsize:\n",
      "            _didx = didx.copy()\n",
      "            _data = data.copy()\n",
      "            leaf = (_didx, _data, None, None, 0, 0)\n",
      "            tree.append(leaf)\n",
      "\n",
      "        # not a leaf, split the data in two      \n",
      "        else:                  \n",
      "            splitdim = depth % ndim\n",
      "            idx = numpy.argsort(data[splitdim,:], kind='mergesort')\n",
      "            data[:,:] = data[:,idx]\n",
      "            didx = didx[idx]\n",
      "            nodeptr = len(tree)\n",
      "            stack.append((data[:,:ndata/2], didx[:ndata/2], depth+1, nodeptr, True))\n",
      "            stack.append((data[:,ndata/2:], didx[ndata/2:], depth+1, nodeptr, False))\n",
      "            splitval = data[splitdim,ndata/2]\n",
      "            if leftbranch:\n",
      "                left_hrect = _left_hrect.copy()\n",
      "                right_hrect = _left_hrect.copy()\n",
      "            else:\n",
      "                left_hrect = _right_hrect.copy()\n",
      "                right_hrect = _right_hrect.copy()\n",
      "            left_hrect[1, splitdim] = splitval\n",
      "            right_hrect[0, splitdim] = splitval\n",
      "            # append node to tree\n",
      "            tree.append((None, None, left_hrect, right_hrect, None, None))\n",
      "\n",
      "    return tree"
     ],
     "language": "python",
     "metadata": {},
     "outputs": []
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "### Searching a kd-tree"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "#!python numbers=disable\n",
      "    \n",
      "\n",
      "def intersect(hrect, r2, centroid):\n",
      "    \"\"\"\n",
      "    checks if the hyperrectangle hrect intersects with the\n",
      "    hypersphere defined by centroid and r2\n",
      "    \"\"\"\n",
      "    maxval = hrect[1,:]\n",
      "    minval = hrect[0,:]\n",
      "    p = centroid.copy()\n",
      "    idx = p < minval\n",
      "    p[idx] = minval[idx]\n",
      "    idx = p > maxval\n",
      "    p[idx] = maxval[idx]\n",
      "    return ((p-centroid)**2).sum() < r2\n",
      "\n",
      "\n",
      "def quadratic_knn_search(data, lidx, ldata, K):\n",
      "    \"\"\" find K nearest neighbours of data among ldata \"\"\"\n",
      "    ndata = ldata.shape[1]\n",
      "    param = ldata.shape[0]\n",
      "    K = K if K < ndata else ndata\n",
      "    retval = []\n",
      "    sqd = ((ldata - data[:,:ndata])**2).sum(axis=0) # data.reshape((param,1)).repeat(ndata, axis=1);\n",
      "    idx = numpy.argsort(sqd, kind='mergesort')\n",
      "    idx = idx[:K]\n",
      "    return zip(sqd[idx], lidx[idx])\n",
      "\n",
      "\n",
      "def search_kdtree(tree, datapoint, K):\n",
      "    \"\"\" find the k nearest neighbours of datapoint in a kdtree \"\"\"\n",
      "    stack = [tree[0]]\n",
      "    knn = [(numpy.inf, None)]*K\n",
      "    _datapt = datapoint[:,0]\n",
      "    while stack:\n",
      "        \n",
      "        leaf_idx, leaf_data, left_hrect, \\\n",
      "                  right_hrect, left, right = stack.pop()\n",
      "\n",
      "        # leaf\n",
      "        if leaf_idx is not None:\n",
      "            _knn = quadratic_knn_search(datapoint, leaf_idx, leaf_data, K)\n",
      "            if _knn[0][0] < knn[-1][0]:\n",
      "                knn = sorted(knn + _knn)[:K]\n",
      "\n",
      "        # not a leaf\n",
      "        else:\n",
      "\n",
      "            # check left branch\n",
      "            if intersect(left_hrect, knn[-1][0], _datapt):\n",
      "                stack.append(tree[left])\n",
      "\n",
      "            # chech right branch\n",
      "            if intersect(right_hrect, knn[-1][0], _datapt):\n",
      "                stack.append(tree[right])              \n",
      "    return knn\n",
      "\n",
      "\n",
      "def knn_search( data, K, leafsize=2048 ):\n",
      "\n",
      "    \"\"\" find the K nearest neighbours for data points in data,\n",
      "        using an O(n log n) kd-tree \"\"\"\n",
      "\n",
      "    ndata = data.shape[1]\n",
      "    param = data.shape[0]\n",
      "    \n",
      "    # build kdtree\n",
      "    tree = kdtree(data.copy(), leafsize=leafsize)\n",
      "   \n",
      "    # search kdtree\n",
      "    knn = []\n",
      "    for i in numpy.arange(ndata):\n",
      "        _data = data[:,i].reshape((param,1)).repeat(leafsize, axis=1);\n",
      "        _knn = search_kdtree(tree, _data, K+1)\n",
      "        knn.append(_knn[1:])\n",
      "\n",
      "    return knn\n",
      "\n",
      "\n",
      "def radius_search(tree, datapoint, radius):\n",
      "    \"\"\" find all points within radius of datapoint \"\"\"\n",
      "    stack = [tree[0]]\n",
      "    inside = []\n",
      "    while stack:\n",
      "\n",
      "        leaf_idx, leaf_data, left_hrect, \\\n",
      "                  right_hrect, left, right = stack.pop()\n",
      "\n",
      "        # leaf\n",
      "        if leaf_idx is not None:\n",
      "            param=leaf_data.shape[0]\n",
      "            distance = numpy.sqrt(((leaf_data - datapoint.reshape((param,1)))**2).sum(axis=0))\n",
      "            near = numpy.where(distance<=radius)\n",
      "            if len(near[0]):\n",
      "                idx = leaf_idx[near]\n",
      "                distance = distance[near]\n",
      "                inside += (zip(distance, idx))\n",
      "\n",
      "        else:\n",
      "\n",
      "            if intersect(left_hrect, radius, datapoint):\n",
      "                stack.append(tree[left])\n",
      "\n",
      "            if intersect(right_hrect, radius, datapoint):\n",
      "                stack.append(tree[right])\n",
      "\n",
      "    return inside"
     ],
     "language": "python",
     "metadata": {},
     "outputs": []
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "### Quadratic search for small data sets\n",
      "\n",
      "In contrast to the kd-tree, straight forward exhaustive search has\n",
      "quadratic complexity with respect to sample size. It can be faster than\n",
      "using a kd-tree when the sample size is very small. On my computer that\n",
      "is approximately 500 samples or less."
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "#!python numbers=disable\n",
      "\n",
      "def knn_search( data, K ):\n",
      "    \"\"\" find the K nearest neighbours for data points in data,\n",
      "        using O(n**2) search \"\"\"\n",
      "    ndata = data.shape[1]\n",
      "    knn = []\n",
      "    idx = numpy.arange(ndata)\n",
      "    for i in numpy.arange(ndata):\n",
      "        _knn = quadratic_knn_search(data[:,i], idx, data, K+1) # see above\n",
      "        knn.append( _knn[1:] )\n",
      "    return knn"
     ],
     "language": "python",
     "metadata": {},
     "outputs": []
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "### Parallel search for large data sets\n",
      "\n",
      "While creating a kd-tree is very fast, searching it can be time\n",
      "consuming. Due to Python's dreaded \"Global Interpreter Lock\" (GIL),\n",
      "threads cannot be used to conduct multiple searches in parallel. That\n",
      "is, Python threads can be used for asynchrony but not concurrency.\n",
      "However, we can use multiple processes (multiple interpreters). The\n",
      "[pyprocessing](http://pyprocessing.berlios.de/) package makes this easy.\n",
      "It has an API similar to Python's threading and Queue standard modules,\n",
      "but work with processes instead of threads. Beginning with Python 2.6,\n",
      "pyprocessing is already included in Python's standard library as the\n",
      "\"multiprocessing\" module. There is a small overhead of using multiple\n",
      "processes, including process creation, process startup, IPC, and process\n",
      "termination. However, because processes run in separate address spaces,\n",
      "no memory contention is incurred. In the following example, the overhead\n",
      "of using multiple processes is very small compared to the computation,\n",
      "giving a speed-up close to the number of CPUs on the computer."
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "#!python numbers=disable\n",
      "\n",
      "try:\n",
      "    import multiprocessing as processing\n",
      "except:\n",
      "    import processing\n",
      "\n",
      "import ctypes, os\n",
      "\n",
      "def __num_processors():\n",
      "    if os.name == 'nt': # Windows\n",
      "        return int(os.getenv('NUMBER_OF_PROCESSORS'))\n",
      "    else: # glibc (Linux, *BSD, Apple)\n",
      "        get_nprocs = ctypes.cdll.libc.get_nprocs\n",
      "        get_nprocs.restype = ctypes.c_int\n",
      "        get_nprocs.argtypes = []\n",
      "        return get_nprocs()\n",
      "        \n",
      "\n",
      "def __search_kdtree(tree, data, K, leafsize):\n",
      "    knn = []\n",
      "    param = data.shape[0]\n",
      "    ndata = data.shape[1]\n",
      "    for i in numpy.arange(ndata):\n",
      "        _data = data[:,i].reshape((param,1)).repeat(leafsize, axis=1);\n",
      "        _knn = search_kdtree(tree, _data, K+1)\n",
      "        knn.append(_knn[1:])\n",
      "    return knn\n",
      "\n",
      "def __remote_process(rank, qin, qout, tree, K, leafsize):\n",
      "    while 1:\n",
      "        # read input queue (block until data arrives)\n",
      "        nc, data = qin.get()\n",
      "        # process data\n",
      "        knn = __search_kdtree(tree, data, K, leafsize)\n",
      "        # write to output queue\n",
      "        qout.put((nc,knn))\n",
      "\n",
      "def knn_search_parallel(data, K, leafsize=2048):\n",
      "\n",
      "    \"\"\" find the K nearest neighbours for data points in data,\n",
      "        using an O(n log n) kd-tree, exploiting all logical\n",
      "        processors on the computer \"\"\"\n",
      "\n",
      "    ndata = data.shape[1]\n",
      "    param = data.shape[0]\n",
      "    nproc = __num_processors()\n",
      "    # build kdtree\n",
      "    tree = kdtree(data.copy(), leafsize=leafsize)\n",
      "    # compute chunk size\n",
      "    chunk_size = data.shape[1] / (4*nproc)\n",
      "    chunk_size = 100 if chunk_size < 100 else chunk_size\n",
      "    # set up a pool of processes\n",
      "    qin = processing.Queue(maxsize=ndata/chunk_size)\n",
      "    qout = processing.Queue(maxsize=ndata/chunk_size)        \n",
      "    pool = [processing.Process(target=__remote_process,\n",
      "                args=(rank, qin, qout, tree, K, leafsize))\n",
      "                    for rank in range(nproc)]\n",
      "    for p in pool: p.start()\n",
      "    # put data chunks in input queue\n",
      "    cur, nc = 0, 0\n",
      "    while 1:\n",
      "        _data = data[:,cur:cur+chunk_size]\n",
      "        if _data.shape[1] == 0: break\n",
      "        qin.put((nc,_data))\n",
      "        cur += chunk_size\n",
      "        nc += 1\n",
      "    # read output queue\n",
      "    knn = []\n",
      "    while len(knn) < nc:\n",
      "        knn += [qout.get()]\n",
      "    # avoid race condition\n",
      "    _knn = [n for i,n in sorted(knn)]\n",
      "    knn = []\n",
      "    for tmp in _knn:\n",
      "        knn += tmp\n",
      "    # terminate workers\n",
      "    for p in pool: p.terminate()\n",
      "    return knn"
     ],
     "language": "python",
     "metadata": {},
     "outputs": []
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "### Running the code\n",
      "\n",
      "The following shows how to run the example code (including how input\n",
      "data should be formatted):"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "#!python numbers=disable\n",
      "\n",
      "from time import clock\n",
      "\n",
      "def test():\n",
      "    K = 11\n",
      "    ndata = 10000\n",
      "    ndim = 12\n",
      "    data =  10 * numpy.random.rand(ndata*ndim).reshape((ndim,ndata) )\n",
      "    knn_search(data, K)\n",
      "\n",
      "if __name__ == '__main__':\n",
      "    t0 = clock()\n",
      "    test()\n",
      "    t1 = clock()\n",
      "    print \"Elapsed time %.2f seconds\" % t1-t0\n",
      " \n",
      "    #import profile          # using Python's profiler is not useful if you are\n",
      "    #profile.run('test()')   # running the parallel search."
     ],
     "language": "python",
     "metadata": {},
     "outputs": []
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [],
     "language": "python",
     "metadata": {},
     "outputs": []
    }
   ],
   "metadata": {}
  }
 ]
}