#!/usr/bin/python
#coding:utf-8

# Copyright 2012 Nicolau Leal Werneck, Anna Helena Reali Costa and
# Universidade de São Paulo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

###############################################################################
### This module includes classes to allow the aplication of the camera
### orientation estimation technique described in the article:
###
### Speeding up probabilistic inference of camera orientation by function
### approximation and grid masking (2011) --- Nicolau Leal Werneck e Anna Helena
### Reali Costa. 19th International Conference on Computer Graphics,
### Visualization and Computer Vision - WSCG'2011. WSCG2011 Communication
### proceesdings.

import sys
import numpy as np
import scipy
import scipy.ndimage
import scipy.stats
import scipy.spatial

from scipy import cos, sin, pi
from scipy.optimize import fmin, fmin_powell

import Image

from itertools import product

## This is not needed if you are using the numpy implementation of the likelihood expression
import camori_aux

import filterSQP

from quaternion import Quat
# from camori_aux import quatSQP_value, quatSQP_gradient, quatSQP_hessian,\
#      quatSQP_lab_value, quatSQP_lab_gradient, quatSQP_lab_hessian,\
#      py_lik, residue_statistics, residue_statistics_lab, set_labels, image_angle_error, image_angle_error_lab
from camori_aux import *

### In this module we create a list of edgels from the image, and all
### calculations are based on it, instead of sweeping the image all
### the time.


## Colors used for plotting different labels (directions)
dir_colors=['#ea6949', '#51c373', '#a370ff', '#444444']


#####################################################################
## This function measures the distance from point p3 to a line
## parameterized by the two points p1 and p2.
def dist_pt_lin(p3,p1,p2):
  u =  ( (p3[0]-p1[:,0])*(p2[:,0]-p1[:,0]) + (p3[1]-p1[:,1])*(p2[:,1]-p1[:,1]) ) \
      / ( (p2[:,0]-p1[:,0])*(p2[:,0]-p1[:,0]) + (p2[:,1]-p1[:,1])*(p2[:,1]-p1[:,1]) )

  return (p3[0]-( p1[:,0]+u*(p2[:,0]-p1[:,0]) ))**2+(p3[1]-( p1[:,1]+u*(p2[:,1]-p1[:,1]) ))**2
##
#####################################################################

#####################################################################
## Estimates landmark location as point that minimizes distance to set
## of lines.
def estimate_landmark(xini,p1, p2):
  def v_fun(x, p1,p2):
    return np.sum( dist_pt_lin(x,p1,p2) )
    ## Utilizando erro absoluto... Parece que a otimização não convergiu muito bem.
    # return np.sum( np.sqrt(dist_pt_lin(x,p1,p2)) ) 
  ropt = fmin(v_fun, xini, args=(p1,p2,), \
           maxiter=1000, full_output=True, disp=False)
  return ropt
##
#####################################################################


#####################################################################
## Mahalanobis distance calculation for two colors.
def color_distance(Sinv, c1, c2):
    pdelta_ = c1 - c2
    ## The Mahalanobis distance. Should actually be the sqrt of that.
    return np.dot(np.dot(pdelta_,Sinv),pdelta_)
##
#####################################################################

#####################################################################
## Convert color triplet from RGB to xyY.
def rgb_to_xyy(rgb):
  bfd_cone = np.array(
    [[ 0.8951, 0.2664, -0.1614 ],
     [-0.7502, 1.7135, 0.0367 ],
     [ 0.0389, -0.0685, 1.0296 ] ])
  XYZ = np.dot(bfd_cone, rgb/255.)
  xyY = np.array( [ XYZ[0]/XYZ.sum(), XYZ[1]/XYZ.sum(), XYZ[1] ])
  return xyY
##
#####################################################################



#####################################################################
## This is a "fundamental" procedure to extract edgels from an
## image. A very quick version is desired. Edgels are detected over a
## line of the image, and then a subpixel estimate of the edge
## location is calculated.
def edgels_sweep_lines_subpixel(gradx, grady, gstep, glim, direction='H'):
  output = []

  if direction=='H':
    lini = 5+((gradx.shape[0]-11)%gstep)/2
    lend = gradx.shape[0]-5
  elif direction=='V':
    lini = 5+((gradx.shape[1]-11)%gstep)/2
    lend = gradx.shape[1]-5

  for l in range(lini,lend,gstep):
    if direction == 'H':
      mygradx = np.copy(gradx[l,:,:])
      mygrady = np.copy(grady[l,:,:])
    elif direction == 'V':
      mygradx = np.copy(scipy.transpose(grady, axes=(1,0,2))[l,:,:])
      mygrady = np.copy(scipy.transpose(gradx, axes=(1,0,2))[l,:,:])
      

    for cc in range(3):
      neg=mygradx[:,cc]<0
      mygradx[neg,cc] = -mygradx[neg,cc]
      mygrady[neg,cc] = -mygrady[neg,cc]

    mygradx = np.sum(mygradx,1)#/3
    mygrady = np.sum(mygrady,1)#/3

    ## Gradient norms
    gn = (mygradx**2+mygrady**2)**0.5

    onde = 1+np.flatnonzero(  ( abs(mygradx[1:-1])>abs(mygrady[1:-1])) * \
               (gn[ :-2]<gn[1:-1]) *\
               (gn[2: ]<=gn[1:-1])  *\
               (gn[1:-1]>glim ) )
    for k in onde:
      ## Calculate the sub-pixel "correction" of the edgel location,
      ## location of local maxima obtained from interpolation
      c = 0.5 * (gn[k-1] - gn[k+1]) / (gn[k+1] + gn[k-1] - 2 * gn[k])

      ## This would be used for safety only, shouldn't be necessary.
      # c = min(c,.5)
      # c = max(c,-.5)

      ## Find interpolated gradient values
      dxt=mygradx[k] + np.abs(c) * (mygradx[k+np.sign(c)]-mygradx[k]  )
      dyt=mygrady[k] + np.abs(c) * (mygrady[k+np.sign(c)]-mygrady[k]  )
      nf = (dxt**2+dyt**2)**-0.5
      dxt *= nf
      dyt *= nf
      # dxt=mygradx[k]
      # dyt=mygrady[k]
      nf = (dxt**2+dyt**2)**-0.5
      dxt *= nf
      dyt *= nf
      if direction=='H':
        output.append( (k+c,l,dxt,dyt, ) )
      elif direction=='V':
        output.append( (l,k+c,dyt,dxt, ) )
  return output
##
#####################################################################

#####################################################################
## This is a "fundamental" procedure to extract edgels from an image. A
## very quick version is desired.
def edgels_sweep_lines(gradx, grady, gstep, glim, direction='H'):
  output = []

  if direction=='H':
    lini = 5+((gradx.shape[0]-11)%gstep)/2
    lend = gradx.shape[0]-5
  elif direction=='V':
    lini = 5+((gradx.shape[1]-11)%gstep)/2
    lend = gradx.shape[1]-5

  for l in range(lini,lend,gstep):
    if direction == 'H':
      mygradx = np.copy(gradx[l,:,:])
      mygrady = np.copy(grady[l,:,:])
    elif direction == 'V':
      mygradx = np.copy(scipy.transpose(grady, axes=(1,0,2))[l,:,:])
      mygrady = np.copy(scipy.transpose(gradx, axes=(1,0,2))[l,:,:])
      

    for cc in range(3):
      neg=mygradx[:,cc]<0
      mygradx[neg,cc] = -mygradx[neg,cc]
      mygrady[neg,cc] = -mygrady[neg,cc]

    mygradx = np.sum(mygradx,1)
    mygrady = np.sum(mygrady,1)

    ## Gradient norms
    gn = (mygradx**2+mygrady**2)**0.5

    onde = 1+np.flatnonzero(  ( abs(mygradx[1:-1])>abs(mygrady[1:-1])) * \
               (gn[ :-2]<gn[1:-1]) *\
               (gn[2: ]<=gn[1:-1])  *\
               (gn[1:-1]>glim ) )
    for k in onde:
      ## Find interpolated gradient values
      dxt=mygradx[k]
      dyt=mygrady[k]
      nf = (dxt**2+dyt**2)**-0.5
      dxt *= nf
      dyt *= nf
      if direction=='H':
        output.append( (k,l,dxt,dyt, ) )
      elif direction=='V':
        output.append( (l,k,dyt,dxt, ) )
  return output
##
#####################################################################



#####################################################################
## This is a "fundamental" procedure to extract edgels from an
## image. A very quick version is desired.
def edgels_sweep_lines_zernike(gradx, grady, A20, gstep, glim, direction='H'):
  output = []

  Nw = 7
  if direction=='H':
    lini = Nw/2+(gradx.shape[0]/2-Nw/2)%gstep
    lend = gradx.shape[0] - Nw/2
    #lini = Nw+((gradx.shape[0]-(2*Nw+1))%gstep)/2
    #lend = gradx.shape[0]-Nw
  elif direction=='V':
    lini = Nw/2+(gradx.shape[1]/2-Nw/2)%gstep
    lend = gradx.shape[1] - Nw/2
    # lini = Nw+((gradx.shape[1]-(2*Nw+1))%gstep)/2
    # lend = gradx.shape[1]-Nw

  for l in range(lini,lend,gstep):
    if direction == 'H':
      mygradx = np.copy(gradx[l,:,:])
      mygrady = np.copy(grady[l,:,:])
      myA20 = np.copy(A20[l,:,:])
    elif direction == 'V':
      mygradx = np.copy(scipy.transpose(grady, axes=(1,0,2))[l,:,:])
      mygrady = np.copy(scipy.transpose(gradx, axes=(1,0,2))[l,:,:])
      myA20 = np.copy(scipy.transpose(A20, axes=(1,0,2))[l,:,:])

    for cc in range(3):
      neg=mygradx[:,cc]<0
      mygradx[neg,cc] = -mygradx[neg,cc]
      mygrady[neg,cc] = -mygrady[neg,cc]
      myA20[neg,cc] = -myA20[neg,cc]

    ## Take the mean value from the 3 channels.
    mygradx = np.sum(mygradx,1)#/3
    mygrady = np.sum(mygrady,1)#/3
    myA20 = np.sum(myA20,1)#/3

    ## Gradient norms from whole line
    amp = (mygradx**2+mygrady**2)**0.5

    ## The Canny-detected edgels, points that point to the sweeping
    ## direction, and that are local maxima of gradient norm in the
    ## sweeping direction.
    onde = 1+np.flatnonzero(  ( abs(mygradx[1:-1])>abs(mygrady[1:-1])) * \
               (amp[ :-2]<amp[1:-1]) *\
               (amp[2: ]<=amp[1:-1])  *\
               (amp[1:-1]>glim ) )
    for k in onde:
      ## The edgel gradient direction (normalized vector in the
      ## orthogonal direction form the edge)
      dxt = mygradx[k] / amp[k]
      dyt = mygrady[k] / amp[k]

      ## "Zernike distance" from the window centers to the extracted
      ## edge, in the direction of the gradient.
      # zd = 3.5 * myA20[k]/amp[k]
      zd = myA20[k]/amp[k]
      ## Displacement in the sweeping direction.
      c = zd*(dyt**2/dxt + dxt)
      # if np.abs(c) > .75:
      #   continue

      dxn = dxt if dxt>0 else -dxt
      dyn = dyt if dxt>0 else -dyt

      if direction=='H':
        output.append( (k+c,l,dxn,dyn, ) )
      elif direction=='V':
        output.append( (l,k+c,dyn,dxn, ) )
  return output
##
#####################################################################







#####################################################################
## Takes a matrix, the multiplication of a "solution" and
## "estimate.T", and do 90 degree rotations and sign changes until it
## looks like an identity matrix, then measure the angle of the
## rotation by looking at the eigenvalues.
def measure_error(RR):
  order = [0,1,2]
  R = np.abs(RR)
  for k in range(3):
    l = np.flatnonzero( R[k,:]==np.max(R[k,:]) )
    if order[k] != l:
      order[l], order[k] = order[k], order[l]
  ## The "normalized" matrix
  out = np.zeros((3,3))
  for k in range(3):
    l=order[k]
    out[l,:] = RR[k,:]
    if out[l,l]<0:
      out[l,:]*=-1
  # The imaginary part of the non 1 eigenvalues is the sin of the
  # angle.
  v,l = scipy.linalg.eig(out)
  return np.max(np.imag(v))
##
#####################################################################




#####################################################################
def quaternion_product(a,b):
  return (
    a[0]*b[0] - a[1]*b[1] - a[2]*b[2]-a[3]*b[3],
    a[0]*b[1] + a[1]*b[0] - a[2]*b[3]+a[3]*b[2],
    a[0]*b[2] + a[1]*b[3] + a[2]*b[0]-a[3]*b[1],
    a[0]*b[3] - a[1]*b[2] + a[2]*b[1]+a[3]*b[0]
    )

def quaternion_complete(a):
  return (np.sqrt(1-(a[0]**2+a[1]**2+a[2]**2)), a[0], a[1], a[2])

def quaternion_product_c(a,b):
  return quaternion_product(quaternion_complete(a), quaternion_complete(b))

  

#####################################################################
## This gets the tree last parameters from the quaternion and maps it
## into values that make sense. Either just to the unit sphere, or
## eliminating the symmetries...
def fix_quaternion_parameters2(myx):
  xb,xc,xd = myx
  xnormsq = xb*xb+xc*xc+xd*xd

  if xnormsq < 1:
    ## If inside the unit sphere, these are the components
    ## themselves, and we just have to calculate a to normalize
    ## the quaternion.
    b,c,d = xb,xc,xd
    a = np.sqrt(1-xnormsq)
  else:
    ## Just to work gracefully if we have invalid inputs, we
    ## reflect the vectors outside the unit sphere to the other
    ## side, and use the inverse norm and negative values of a.
    b,c,d = -xb/xnormsq,-xc/xnormsq,-xd/xnormsq
    a = -np.sqrt(1- 1.0/xnormsq )
    ## This should not be used, it defeats the whole concept that
    ## small (b,c,d) vector norms have small rotation angles. It's
    ## really just to let us work near the borders of the
    ## sphere. Any optimization algorithm should work with
    ## initalizations just inside the spere, and avoid wandering
    ## outside of it.

  assert a >= -1
  assert a <= 1
  
  return a,b,c,d
##
#####################################################################

#####################################################################
## This gets the tree last parameters from the quaternion and maps it
## into values that make sense. Either just to the unit sphere, or
## eliminating the symmetries...
def fix_quaternion_parameters(myx):
  xb,xc,xd = myx
  xnormsq = xb*xb+xc*xc+xd*xd

  if xnormsq < 1:
    ## If inside the unit sphere, these are the components
    ## themselves, and we just have to calculate a to normalize
    ## the quaternion.
    b,c,d = xb,xc,xd
    a = np.sqrt(1-xnormsq)
  else:
    ## Just to work gracefully if we have invalid inputs, we
    ## reflect the vectors outside the unit sphere to the other
    ## side, and use the inverse norm and negative values of a.
    b,c,d = -xb/xnormsq,-xc/xnormsq,-xd/xnormsq
    a = -np.sqrt(1- 1.0/xnormsq )
    ## This should not be used, it defeats the whole concept that
    ## small (b,c,d) vector norms have small rotation angles. It's
    ## really just to let us work near the borders of the
    ## sphere. Any optimization algorithm should work with
    ## initalizations just inside the spere, and avoid wandering
    ## outside of it.

  assert a >= -1
  assert a <= 1

  ## Here we take in consideration only small rotations around y
  ## axis, from 0 to 90 degrees.
  # if c>=0.0 and (np.abs(c)<0.70711) and (a > 0.8):
  ## This is a more general case, just limit somewhat around y, and to
  ## the left or right.
  if (np.abs(c)<0.70711):
    return a,b,c,d

  ## Else we look for alternatives, removing 180 and 90 degrees from the estimated rotation.
  rots = np.array([ [ 0,0,-1,0 ],[cos(-.25*pi),0,sin(-.25*pi),0 ],
           [ 1,0,0,0 ],[cos(.25*pi),0,sin(.25*pi),0 ],
           [ 0,0,1,0 ]           ])

  # rots = np.array([ [ 0,0,-1,0 ],
  #          [1,0,0,0 ],
  #          [0,0,1,0 ]
  #          ])
  alts = np.array([quaternion_product( rr, (a,b,c,d)) for rr in rots])
  who = np.flatnonzero((alts[:,2]>=0) * (np.abs(alts[:,2])<0.70711) * (alts[:,0]>0.8))
  if len(who):
    return alts[who[0]]
  else:
    # print 'SOCORRO!!!'
    # print (a,b,c,d)
    return (a,b,c,d)
##
#####################################################################



#####################################################################
## Produces a rotation matrix from the 3 last components of a
## quaternion.
def quaternion_to_matrix(myx):
  '''Converts from a quaternion representation (last 3 values) to rotation matrix.'''

  a,b,c,d = fix_quaternion_parameters(myx)

  ## Notice we return a transpose matrix, because we work with line-vectors
  return np.array([ [(a*a+b*b-c*c-d*d), (2*b*c-2*a*d),   (2*b*d+2*a*c)   ],
             [(2*b*c+2*a*d),   (a*a-b*b+c*c-d*d), (2*c*d-2*a*b)   ],
             [(2*b*d-2*a*c),   (2*c*d+2*a*b),   (a*a-b*b-c*c+d*d)] ] ).T \
             / (a*a+b*b+c*c+d*d)
##
#####################################################################






## Conic constraint
def val_c(x):
    return np.linalg.norm(x)
def grad_c(x):
    return x/np.linalg.norm(x)
def hess_c(x):
    nx = np.linalg.norm(x)
    return (nx**2 * np.identity(x.shape[0]) - np.outer(x,x)) / nx**3

## Target function
def val_f(x,*fargs):
    return quatSQP_value(x,fargs[0],fargs[2],fargs[3],fargs[4])\
           - quatSQP_value(x,fargs[1],fargs[2],fargs[3],fargs[4])
def grad_f(x,*fargs):
    return quatSQP_gradient(x,fargs[0],fargs[2],fargs[3],fargs[4])\
           - quatSQP_gradient(x,fargs[1],fargs[2],fargs[3],fargs[4])
def hess_f(x,*fargs):
    return quatSQP_hessian(x,fargs[0],fargs[2],fargs[3],fargs[4])\
           - quatSQP_hessian(x,fargs[1],fargs[2],fargs[3],fargs[4])

## Target function with fixed labels
def val_f2(x,*fargs):
    return quatSQP_lab_value(x,fargs[0],fargs[2],fargs[3],fargs[4],fargs[5])\
           - quatSQP_lab_value(x,fargs[1],fargs[2],fargs[3],fargs[4],fargs[5])
def grad_f2(x,*fargs):
    return quatSQP_lab_gradient(x,fargs[0],fargs[2],fargs[3],fargs[4],fargs[5])\
           - quatSQP_lab_gradient(x,fargs[1],fargs[2],fargs[3],fargs[4],fargs[5])
def hess_f2(x,*fargs):
    return quatSQP_lab_hessian(x,fargs[0],fargs[2],fargs[3],fargs[4],fargs[5])\
           - quatSQP_lab_hessian(x,fargs[1],fargs[2],fargs[3],fargs[4],fargs[5])

## Target function without de-biasing
def val_f3(x,*fargs):
  return quatSQP_value(x,fargs[0],fargs[1],fargs[2],fargs[3])
def grad_f3(x,*fargs):
  return quatSQP_gradient(x,fargs[0],fargs[1],fargs[2],fargs[3])
def hess_f3(x,*fargs):
  return quatSQP_hessian(x,fargs[0],fargs[1],fargs[2],fargs[3])

## Target function with fixed labels and without de-biasing
def val_f4(x,*fargs):
  return quatSQP_lab_value(x,fargs[0],fargs[1],fargs[2],fargs[3],fargs[4])
def grad_f4(x,*fargs):
  return quatSQP_lab_gradient(x,fargs[0],fargs[1],fargs[2],fargs[3],fargs[4])
def hess_f4(x,*fargs):
  return quatSQP_lab_hessian(x,fargs[0],fargs[1],fargs[2],fargs[3],fargs[4])

class Picture:
  ## Directional derivative filters
  sobel_filter=scipy.array([
      [-1, 0, 1],
      [-2, 0, 2],
      [-1, 0, 1] ])/8.0

  scharr_filter=scipy.array([
      [ -3, 0, 3],
      [-10, 0,10],
      [ -3, 0, 3] ])/32.0

  ## Directional derivative filter
  shigeru_filter=-scipy.array([
      [ -0.003776, -0.010199, 0., 0.010199, 0.003776 ],
      [ -0.026786, -0.070844, 0., 0.070844, 0.026786 ],
      [ -0.046548, -0.122572, 0., 0.122572, 0.046548 ],
      [ -0.026786, -0.070844, 0., 0.070844, 0.026786 ],
      [ -0.003776, -0.010199, 0., 0.010199, 0.003776 ]
      ])

  zernike_V11_5x5 = np.array([
      [ -146.67,  -468.68,     0.  ,   468.68,   146.67],
      [ -933.33,  -640.  ,     0.  ,   640.  ,   933.33],
      [-1253.33,  -640.  ,     0.  ,   640.  ,  1253.33],
      [ -933.33,  -640.  ,     0.  ,   640.  ,   933.33],
      [ -146.67,  -468.68,     0.  ,   468.68,   146.67]])/19368.05


  zernike_V11_7x7 = np.array([
      [   0.  , -150.46, -190.2 ,    0.  ,  190.2 ,  150.46,    0.  ],
      [-224.25, -465.74, -233.24,    0.  ,  233.24,  465.74,  224.25],
      [-573.37, -466.47, -233.24,    0.  ,  233.24,  466.47,  573.37],
      [-689.99, -466.47, -233.24,    0.  ,  233.24,  466.47,  689.99],
      [-573.37, -466.47, -233.24,    0.  ,  233.24,  466.47,  573.37],
      [-224.25, -465.74, -233.24,    0.  ,  233.24,  465.74,  224.25],
      [   0.  , -150.46, -190.2 ,    0.  ,  190.2 ,  150.46,    0.  ]])/27314.0


  zernike_V11_9x9 = np.array([[ 0., -11.73, -109.17, -94.2, 0., 94.2, 109.17,
                             11.73, 0. ],
                           [ -16.09, -253.68, -219.48, -109.74, 0., 109.74, 219.48,
                              253.68, 16.09],
                           [-214.91, -329.22, -219.48, -109.74, 0., 109.74, 219.48,
                             329.22, 214.91],
                           [-379.52, -329.22, -219.48, -109.74, 0., 109.74, 219.48,
                             329.22, 379.52],
                           [-434.39, -329.22, -219.48, -109.74, 0., 109.74, 219.48,
                             329.22, 434.39],
                           [-379.52, -329.22, -219.48, -109.74, 0., 109.74, 219.48,
                             329.22, 379.52],
                           [-214.91, -329.22, -219.48, -109.74, 0., 109.74, 219.48,
                             329.22, 214.91],
                           [ -16.09, -253.68, -219.48, -109.74, 0., 109.74, 219.48,
                              253.68, 16.09],
                           [ 0., -11.73, -109.17, -94.2, 0., 94.2, 109.17,
                             11.73, 0. ]]) / 35236.70736

  zernike_V20_5x5 = 2.5 * np.array([
      [  176.  ,   595.07,   505.86,   595.07,   176.  ],
      [  595.07,  -490.67, -1002.67,  -490.67,   595.07],
      [  505.86, -1002.67, -1514.67, -1002.67,   505.86],
      [  595.07,  -490.67, -1002.67,  -490.67,   595.07],
      [  176.  ,   595.07,   505.86,   595.07,   176.  ]])/19368.05


  zernike_V20_7x7 = 3.5 * np.array([
      [   0.  ,  224.66,  393.73,  395.52,  393.73,  224.66,    0.  ],
      [ 224.66,  271.06, -127.72, -261.  , -127.72,  271.06,  224.66],
      [ 393.73, -127.72, -527.56, -660.84, -527.56, -127.72,  393.73],
      [ 395.52, -261.  , -660.84, -794.11, -660.84, -261.  ,  395.52],
      [ 393.73, -127.72, -527.56, -660.84, -527.56, -127.72,  393.73],
      [ 224.66,  271.06, -127.72, -261.  , -127.72,  271.06,  224.66],
      [   0.  ,  224.66,  393.73,  395.52,  393.73,  224.66,    0.  ]])/27314.0

  zernike_V20_9x9 = 4.5 * np.array([[ 0. , 19.03, 200.93, 278.78, 290.06, 278.78, 200.93,
                             19.03, 0. ],
                           [ 19.03, 274.72, 148.35, 2.03, -46.74, 2.03, 148.35,
                             274.72, 19.03],
                           [ 200.93, 148.35, -95.51, -241.83, -290.61, -241.83, -95.51,
                             148.35, 200.93],
                           [ 278.78, 2.03, -241.83, -388.15, -436.93, -388.15, -241.83,
                             2.03, 278.78],
                           [ 290.06, -46.74, -290.61, -436.93, -485.7 , -436.93, -290.61,
                             -46.74, 290.06],
                           [ 278.78, 2.03, -241.83, -388.15, -436.93, -388.15, -241.83,
                             2.03, 278.78],
                           [ 200.93, 148.35, -95.51, -241.83, -290.61, -241.83, -95.51,
                             148.35, 200.93],
                           [ 19.03, 274.72, 148.35, 2.03, -46.74, 2.03, 148.35,
                             274.72, 19.03],
                           [ 0. , 19.03, 200.93, 278.78, 290.06, 278.78, 200.93,
                             19.03, 0. ]])/ 35236.70736

  def __init__(self, frame):
    try:
      # self.frame = scipy.array( pylab.flipud(pylab.imread(filename)), dtype=float)
      self.frame = frame
    except IOError:
      print "SOCORRO!, File not found"
      print filename
      self=None
      raise Exception('File not found')

    self.grid_inc = 1 ## The increment when using a grid...

    ## Image dimensions
    self.Iheight, self.Iwidth = self.frame.shape[:2]

    self.edgels = []
    self.new_labels = []
    self.rect_edgels = []



  def extract_edgels(self, gstep, glim, method=0):
    if method == 0:
      self.derivative_filter = self.sobel_filter
    elif method == 1:
      self.derivative_filter = self.scharr_filter
    elif method == 2:
      self.derivative_filter = self.shigeru_filter
    elif method == 3:
      self.derivative_filter = self.zernike_V11_5x5
      self.laplacean_filter = self.zernike_V20_5x5
    elif method == 4:
      self.derivative_filter = self.zernike_V11_7x7
      self.laplacean_filter = self.zernike_V20_7x7
    elif method == 5:
      self.derivative_filter = self.zernike_V11_9x9
      self.laplacean_filter = self.zernike_V20_9x9
    else:
      raise 'Invalid edgel extraction method.'
      
      
    ## Calculate gradients
    self.gradx = scipy.zeros(self.frame.shape, dtype=np.float32)
    self.grady = scipy.zeros(self.frame.shape, dtype=np.float32)
    for c in range(3):
      scipy.ndimage.convolve(self.frame[:,:,c], self.derivative_filter,  self.gradx[:,:,c] )
      scipy.ndimage.convolve(self.frame[:,:,c], self.derivative_filter.T, self.grady[:,:,c] )

    if method >= 3 and method <= 5:
      ## Calculate laplacean
      self.A20 = scipy.zeros(self.frame.shape, dtype=np.float32)
      for c in range(3):
        scipy.ndimage.convolve(self.frame[:,:,c], self.laplacean_filter, self.A20[:,:,c] )

    ## Run edgel extraction procedure using the calculated gradients
    if method >= 0 and method < 3:
      self.edgels = cython_edgel_extractor(gstep, glim, self.gradx, self.grady)
    elif method >= 3 and method <= 5:
      self.edgels = cython_edgel_extractor_zernike(gstep, glim, self.gradx,
                                                   self.grady, self.A20)
    else:
      raise 'Invalid edgel extraction method.'

    ## Store number of edgels
    self.Ned = self.edgels.shape[0]

    ## Array that contains the label of each edgel
    self.labels = np.ascontiguousarray( np.zeros(len(self.edgels), dtype=np.int32) )


  def decimate_edgels(self, dec_t, dec_d, dec_l=2, method=0):
    kdt = scipy.spatial.cKDTree( self.edgels[:,:2] )
    Nned = 0
    esel = np.zeros(self.Ned, dtype=int)
    ## If we "gotta catch them all", we just count for each edgel how
    ## many neighbours it has. But the neighborhood doesnp t take just
    ## in account the dstance, that is what the kdt uses to give us
    ## the preliminary neighbours list. We also look at the angular
    ## error to further discard some of these initial neighboring
    ## edgels.
    if method == 0:
      count = np.zeros(self.Ned, dtype=int)
      for n in xrange(self.Ned):
        ## This edgel already "passed".
        if count[n] >= dec_l:
          continue
        dist, idx = kdt.query(self.edgels[n,:2], 20, eps=0, p=1, distance_upper_bound=dec_d)
        hits = 0
        for mm in xrange(idx.shape[0]):
          d = dist[mm]
          m = idx[mm]
          ## Discard the last position of the output array from the query.
          if m == self.Ned:
            break
          ## Calculate the angular error 
          err_a, err_d = directional_error(self.edgels, m, n)
          if err_a < dec_t:
            count[m] += 1
            count[n] += 1
            if count[n] >= dec_l:
              break
        ## templates now contains hoe many distance+angule neighbors each
        ## edgel has, and we can use this to discard "lonely", edgels with
        ## few similar ones around.
      esel = np.nonzero(count >= dec_l)[0]

    ## This alternative is in case we want to perform a
    ## homogeneization of the density of observations. We discard
    ## edgels that already have a similar one in the output list.
    elif method == 1:
      templates = {}    
      for n in range(0,self.Ned):
        dist, idx = kdt.query(self.edgels[n,:2], 20, eps=0, p=1, distance_upper_bound=dec_d)
        hits = 0
        for mm in xrange(idx.shape[0]):
          d = dist[mm]
          m = idx[mm]
          ## Discard the last position of the output array from the query.
          if m == self.Ned:
            break
          ## We only count hits in the edgels already in the templates list.
          if not m in templates:
            continue
          err_a, err_d = directional_error(self.edgels, m, n)
          if err_a < dec_t:
            ## strengthen existing neighbor templates
            templates[m] += 1
            hits += 1
        ## No hits, this can become a new template. We might use
        ## "hits" here, but I am not sre exactly how right
        ## now. Better to start with 0 and let new edgels vote for
        ## it.
        if hits == 0:
          templates[n] = 0
      ## templates now contain a list of edgels that "dominate" it
      ## own regions, no two templates are close to each other, and
      ## their counts gve an estimate of how many of the original
      ## observations are similar to it. Now we can discard the
      ## weaker observations, like previously, bu we also
      ## avoidhaving too many instances of very similar
      ## observations.
      esel = [k for k in templates if templates[k] >= dec_l]      

    ## This method uses a similarity measurement based on the
    ## interpretation plane normals. More suited for very distorted
    ## projections, and should probably be better for very wide grids,
    ## comparing very distant edgels.
    if method == 2:
      count = np.zeros(self.Ned, dtype=int)
      for n in xrange(self.Ned):
        ## This edgel already "passed".
        if count[n] >= dec_l:
          continue
        dist, idx = kdt.query(self.edgels[n,:2], 20, eps=0, p=1, distance_upper_bound=dec_d)
        hits = 0
        for mm in xrange(idx.shape[0]):
          d = dist[mm]
          m = idx[mm]
          ## Discard the last position of the output array from the query.
          if m == self.Ned:
            break
          ## Calculate the angular error 
          err_a = interpretation_plane_error(self.normals, m, n)
          if err_a < dec_t:
            count[m] += 1
            count[n] += 1
            if count[n] >= dec_l:
              break
        ## templates now contains hoe many distance+angule neighbors each
        ## edgel has, and we can use this to discard "lonely", edgels with
        ## few similar ones around.
      esel = np.nonzero(count >= dec_l)[0]


    ## Now we pick up the final list of output edgels, discarding
    ## "weak" observations.
    self.edgels = self.edgels[esel]
    self.Ned = self.edgels.shape[0]






  def calculate_edgel_normals(self):
    raise NotImplementedError

  def old_find_orientation(self, xini):
    def v_fun(x, *args):
      ## Get the rotation matrix
      rotM = quaternion_to_matrix( x )
      ## Call the calculation method
      return self.calculate_MAP(rotM)

    #################################################################
    ## Execute the Simplex optimization to estimate orientation
    ## from the initial estimate xini

    ## Powell minimization
    # ropt = fmin_powell(v_fun, xini, xtol=1e-9, ftol=1e-9,
    #       maxiter=10000, full_output=True, disp=False)
    ## Simplex optimization
    ## Default xtol and ftol are 1e-4
    print xini
    ropt = fmin(v_fun, xini, xtol=1e-9, ftol=1e-9,
          maxiter=10000, full_output=True, disp=False)
    sol=ropt[0]
    sol = fix_quaternion_parameters(ropt[0])[1:]
    # self.orientation = np.copy(sol)
    ropt=list(ropt)
    ropt[0]=np.copy(sol)#self.orientation)
    ##
    #################################################################

    return ropt

  def set_labels(self, sig):

    if self.new_labels == []:
      self.new_labels = np.zeros(self.edgels.shape[0], dtype=np.int32)
    isig = 1./sig
    fp_bw = (isig, 3, 1.5) ## 3.44 recommended in principle.
    set_labels(self.orientation.q, self.normals, self.new_labels, *fp_bw)

  def find_orientation(self, xini=[], in_sig=0.0, method=0):
    if method == 0:
      ##################################################################
      ## This is the two-step methond, first using absolute error,
      ## then Tukey's bisquare.
      sqp_funcs = (val_c, grad_c, hess_c, val_f, grad_f, hess_f)

      ## First step
      ##
      ## Set the parameters for the loss function
      fp_l1 = (1., 1, 0.) ## Absolute, linear loss function
      args_f = (self.normals,self.rand_normals) + fp_l1
      minerr = np.inf
      ## Set the list of initial points to test, then pick the result
      ## with the smallest error overall.
      ini_L = xini if xini!= [] else filterSQP.initial_guesses
      for nk,xi in enumerate(ini_L):
        xo, err = filterSQP.filterSQP(Quat(xi).q,.0,.1,sqp_funcs,args_f,delta_tol=1e-7)
        residues = np.sort(residue_statistics(xo, self.normals, *fp_l1))
        if err <= minerr:
          minerr, sol_s1, median_err = err, xo, residues[residues.shape[0]/2]

      ## Second step
      ##
      ## Set the scale factor from the step 1 residue, or from the
      ## input argument.
      sig_hat = in_sig if in_sig else median_err/0.675
      isig = 1./sig_hat

      ## Parameters for Tukey's biweight function
      fp_bw = (isig, 3, 1.5) ## 3.44 recommended in principle.
      args_f = (self.normals,self.rand_normals) + fp_bw
      sol, err_bw = filterSQP.filterSQP(sol_s1,0.0,0.1,sqp_funcs,args_f,delta_tol=1e-15)

      return Quat(sol), sig_hat

    elif method == 1 and in_sig != 0.0:
      ################################################################
      ## This is just the "second step", Tukey's bisquare

      ## Set the parameters for the loss function
      sig_hat = in_sig
      isig = 1./sig_hat
      fp_bw = (isig, 3, 1.5)
      args_f = (self.normals,self.rand_normals) + fp_bw

      minerr = np.inf
      ini_L = xini if xini!= [] else filterSQP.initial_guesses
      sqp_funcs = (val_c, grad_c, hess_c, val_f, grad_f, hess_f)
      for nk,xi in enumerate(ini_L):
        xo, err = filterSQP.filterSQP(Quat(xi).q,.0,.1,sqp_funcs,args_f,delta_tol=1e-15)
        if err <= minerr:
          minerr, sol = err, xo
      return Quat(sol), minerr

    elif method == 2 and in_sig != 0.0:
      ## Set the parameters for the loss function
      sig_hat = in_sig
      isig = 1./sig_hat
      fp_bw = (isig, 3, 1.5)
      args_f = (self.normals,self.rand_normals,self.new_labels) + fp_bw

      ## Just a Tukey, withoud de-biasing
      sqp_funcs = (val_c, grad_c, hess_c, val_f2, grad_f2, hess_f2)
      sol, err = filterSQP.filterSQP(xini,0.,.1,sqp_funcs,args_f,delta_tol=1e-17)
      return Quat(sol), err

    elif method == 3:

      sqp_funcs = (val_c, grad_c, hess_c, val_f4, grad_f4, hess_f4)

      fp_l1 = (1.,1,0)
      args_f = (self.normals,self.new_labels) + fp_l1
      xo, err = filterSQP.filterSQP(xini,0.,.1,sqp_funcs,args_f,delta_tol=1e-7)
      sel = np.nonzero(self.new_labels!=3)[0]
      residues = np.sort(residue_statistics(xo, self.normals[sel], *fp_l1))
      median_err = residues[residues.shape[0]/2]
      sig_hat = in_sig if in_sig else median_err/0.675

      fp_bw = (1./sig_hat, 3, 1.5)
      args_f = (self.normals,self.new_labels) + fp_bw
      sol, err = filterSQP.filterSQP(xo,0.,.1,sqp_funcs,args_f,delta_tol=1e-17)
      return Quat(sol), sig_hat

    else:
      raise Exception('Missing variance parameters, or wrong method number.')

  def find_vanishing_points(self, in_sig=0.0):
    sqp_funcs = (val_c, grad_c, hess_c, val_f4, grad_f4, hess_f4)
    ## Set the parameters for the loss function
    fp_l1 = (1., 1, 0.) ## Absolute, linear loss function

    xi = self.orientation.q
    vps = np.zeros((3,3))
    for lab in range(3):
      sel = np.nonzero(self.new_labels==lab)[0]
      ## One normal already classified as in one of the other
      ## directions, to keep the oslution close and steady.
      ort = self.normals[np.nonzero(self.new_labels==((lab+1)%3))[0]]

      ## Create new arrays with normals and labels for this direction.
      # normals_lab = np.r_[self.normals[sel], ort]
      normals_lab = self.normals[sel]
      labels_lab = np.zeros(normals_lab.shape[0], dtype=np.int32)
      labels_lab[:] = lab
      #labels_lab[-1] = (lab+1)%3
      
      args_f = (normals_lab,labels_lab) + fp_l1      
      xo, err = filterSQP.filterSQP(xi,.0,.1,sqp_funcs,args_f,delta_tol=1e-7)
      residues = np.sort(residue_statistics_lab(xo, normals_lab, labels_lab, *fp_l1))
      median_err = residues[residues.shape[0]/2]      

      ## Second step
      sig_hat = in_sig if in_sig else median_err/0.675
      isig = 1./sig_hat
      ## Parameters for Tukey's biweight function
      fp_bw = (isig, 3, 1.5) ## 3.44 recommended in principle.
      args_f = (normals_lab,labels_lab) + fp_bw
      sol, err_bw = filterSQP.filterSQP(xo,0.0,0.1,sqp_funcs,args_f,delta_tol=1e-15)

      vps[lab] = Quat(sol).rot()[:,lab]

    return vps

  def angle_error(self, midx,midy,fd,q,fp):
    err = image_angle_error(midx,midy,fd, q,
                            self.edgels, *fp)
    return err

  def angle_error_res(self, midx,midy,fd,q,fp):
    res = image_angle_error_residues(midx,midy,fd, q,
                                     self.edgels, *fp)
    return res

  def angle_labels(self, midx,midy,fd,q,fp):
    res = image_angle_labels(midx,midy,fd, q,
                                     self.edgels, *fp)
    return res

  def angle_find_orientation(self, xini):
    def v_fun(x, *args):
      ## Call the calculation method
      q = Quat(x)
      #fp = (1/.15,3,1.5)
      fp=(1.,1,0)
      return self.angle_error(self.middlex, self.middley, self.fd, q.q, fp)

    #################################################################
    ## Execute the Simplex optimization to estimate orientation
    ## from the initial estimate xini

    ## Powell minimization
    # ropt = fmin_powell(v_fun, xini, xtol=1e-9, ftol=1e-9,
    #       maxiter=10000, full_output=True, disp=False)
    ## Simplex optimization
    ## Default xtol and ftol are 1e-4
    print xini
    ropt = fmin(v_fun, xini, xtol=1e-9, ftol=1e-9,
          maxiter=10000, full_output=True, disp=False)
    sol=ropt[0]
    sol = fix_quaternion_parameters(ropt[0])[1:]
    # self.orientation = np.copy(sol)
    ropt=list(ropt)
    ropt[0]=np.copy(sol)#self.orientation)
    ##
    #################################################################

    return ropt


  def plot_vdirs(self, ax, spacing, myR):
    raise NotImplementedError

  def plot_edgels(self, ax, scale=1):
    # ax.plot(self.edgels[:,0], self.edgels[:,1], 'r+', mew=1.0)
    ax.plot((self.edgels[:,[0,0]] - scale*np.c_[-self.edgels[:,3], self.edgels[:,3]]).T,
            (self.edgels[:,[1,1]] + scale*np.c_[-self.edgels[:,2], self.edgels[:,2]]).T,
            'r-')
    # ax.plot((self.edgels[:,[0,0]] - scale*np.c_[-self.edgels[:,3], self.edgels[:,3]]).T,
    #         (self.edgels[:,[1,1]] + scale*np.c_[-self.edgels[:,2], self.edgels[:,2]]).T,
    #         '-',lw=3.0,alpha=0.4,color='#aaccff')
    #         # '-',lw=3.0,alpha=0.7,color='#000044')
    # ax.plot((self.edgels[:,[0,0]] - scale*np.c_[-self.edgels[:,3], self.edgels[:,3]]).T,
    #         (self.edgels[:,[1,1]] + scale*np.c_[-self.edgels[:,2], self.edgels[:,2]]).T,
    #         '-',lw=1.0,alpha=1.0,color='#ff8844')
    #         # '-',lw=1.0,alpha=1.0,color='#ff8844')

  def plot_edgels_lab(self, ax, scale=1):
    for lab in range(4):
      sel = np.nonzero(self.new_labels==lab)[0]
      ed = self.edgels[sel]
      ax.plot((ed[:,[0,0]] - scale*np.c_[-ed[:,3], ed[:,3]]).T,
              (ed[:,[1,1]] + scale*np.c_[-ed[:,2], ed[:,2]]).T, dir_colors[lab])

  def rectified_edgels(self):
    ## The rotation matrix
    rotM = quaternion_to_matrix( self.orientation )
    rr= np.dot(np.c_[ self.edgels[:,:2], np.ones(self.Ned) ],
           rotM.T)

    ## Separate just the observations with negative x
    rr = np.c_[ rr[:,[2,1]]/-rr[:,[0,0]], rr[:,0] ]

    return rr

  def calculate_MAP(self, rotM):
    raise NotImplementedError




class PicturePinhole(Picture):
  '''For perspective transform, "pinhole" cameras.'''

  def __init__(self, frame, fd, middlex=None, middley=None, distortion=0.0):

    Picture.__init__(self, frame)

    self.method='nic'

    ## Focal distance (default)
    self.fd = fd

    ## Principal point
    self.middlex = middlex if middlex else (self.Iwidth-1)/2.0
    self.middley = middley if middley else (self.Iheight-1)/2.0

    self.distortion = distortion

  def extract_edgels(self, gstep, glim, method=0):
    ## Call basic class edgel extractor
    Picture.extract_edgels(self, gstep, glim, method)
    ## Now calculate the 'extrinsic' edgels, subtract image centre and normalize
    ## focal distance

  def calculate_extrinsic_edgels(self):
    self.extrinsic_edgels = scipy.copy(self.edgels)
    self.extrinsic_edgels[:,:2] -= scipy.array([self.middlex, self.middley])
    self.extrinsic_edgels[:,:2] /= self.fd

  def calculate_weighted_edgel_normals(self):
    ## Calculate the normals vector of each edgel, given the intrinsic
    ## parameters, and weighted by the current orientation estimate.
    sel = np.nonzero(self.new_labels != 3)[0]
    selno = np.nonzero(self.new_labels == 3)[0]
    px,py,ux,uy = np.array(self.edgels[sel], dtype=np.double).T
    px -= self.middlex
    py -= self.middley
    pz = self.fd
    self.normals[sel] = np.array(np.c_[-pz*ux, -pz*uy, px*ux+py*uy])

    R = np.r_[self.orientation.rot().T,np.zeros((1,3))]
    r = R[self.new_labels[sel]]
    
    rn = r*np.c_[3*[((r**2).sum(1))**-.5]].T
    nf = ((rn[:,2]*px-pz*rn[:,0])**2+(rn[:,2]*py-pz*rn[:,1])**2)**-.5

    self.normals[sel] = self.normals[sel] * np.c_[3*[nf]].T
    self.normals[selno] = 0

  def calculate_edgel_normals(self):
    ## Calculate the normals vector of each edgel, given the
    ## intrinsic parameters
    px,py,ux,uy = np.array(self.edgels, dtype=np.double).T
    px -= self.middlex
    py -= self.middley
    pz = self.fd
    self.normals = np.array(np.c_[-pz*ux, -pz*uy, px*ux+py*uy])
    self.normals = self.normals / np.c_[3*[np.sqrt((self.normals**2).sum(1))]].T

  def calculate_edgel_normals_harris(self):
    ## Calculate the normals vector of each edgel, given the
    ## intrinsic parameters (and distortion).
    
    self.normals = harris_normals(self.middlex, self.middley, self.fd, self.distortion,
                                        self.edgels)

  def calculate_random_normals_harris(self):
    self.rand_normals = harris_normals(self.middlex, self.middley, self.fd, self.distortion,
                                        self.random_edgels)

  def generate_random_edgels(self):
    ## Calculate the normals vector of each edgel, given the
    ## intrinsic parameters (and distortion).
    self.random_edgels = np.copy(self.edgels)
    ux, uy = np.random.rand(2,(self.edgels.shape[0]))
    nf = (ux**2+uy**2)**-.5
    self.random_edgels[:,2] = ux*nf
    self.random_edgels[:,3] = uy*nf

  def calculate_random_normals(self):
    ## Calculate the normals vector of each edgel, given the
    ## intrinsic parameters (and distortion).
    px,py,ux,uy = np.array(self.random_edgels, dtype=np.double).T
    px -= self.middlex
    py -= self.middley
    pz = self.fd
    self.rand_normals = np.array(np.c_[-pz*ux, -pz*uy, px*ux+py*uy])
    self.rand_normals = self.rand_normals / np.sqrt(np.c_[3*[(self.rand_normals**2).sum(1)]]).T

  def vp_dirs(self, vec, x,y):
    rx,ry,rz = vec
    vy,vx=ry*self.fd-rz*y, rx*self.fd-rz*x
    fn = (vx**2+vy**2)**-0.5
    return (vx*fn,vy*fn)

  def calculate_MAP_np(self, rotM):
    projs = np.zeros((self.Ned, 4))
    projs[:,3] = self.p3 ## Set the maximum error to take edgel in consideration

    ang_err = 0

    dx, dy = self.extrinsic_edgels[:,2], self.extrinsic_edgels[:,3]
    x, y = self.extrinsic_edgels[:,0], self.extrinsic_edgels[:,1]
    ## Here we finally calculate the 'predicted' edge directions for each
    ## extracted edgel position and possible direction in the world, then find
    ## the value of the projections of the gradient direction that we seek to
    ## minimize.
    for d in range(3):
      rx,ry,rz = rotM[d]
      vy,vx=ry-rz*y, rx-rz*x
      fn = (vx**2+vy**2)**-0.5
      vx,vy = vx*fn,vy*fn

      projs[:,d] = np.abs(dx*vx+dy*vy)

    ## The sum of all the smallest (assigned direction) projections for each
    ## edgel.
    ang_err += np.sum(np.min(proj, 1))
    self.labels = array([nonzero(x==x.min())[0][0] for x in projs])
    return ang_err

  def calculate_MAP(self, rotM):
    myvps = scipy.array(rotM, dtype=np.float32)
    rx = np.ascontiguousarray(myvps[:,0])
    ry = np.ascontiguousarray(myvps[:,1])
    rz = np.ascontiguousarray(myvps[:,2])

    ang_err = camori_aux.nic_MAP_estimator_edgels( rx,ry,rz,
                             self.extrinsic_edgels,
                             self.labels,
                             self.p3 )


    return ang_err

  def plot_vdirs(self, ax, spacing, myR):
    #############################################################
    ## Plot the vanishing point directions at various pixels. ax
    ## is a matplotlib axes, taken with "gca()". Spacing the
    ## separation bweteen the points, and myR the rotation matrix.
    qq = spacing*0.45*np.array([-1,+1])
    LL0=[]
    LL1=[]
    LL2=[]
    # for j in np.mgrid[-self.Iheight/2:self.Iheight/2:spacing]:
    #   for k in np.mgrid[-self.Iwidth/2:self.Iwidth/2:spacing]:
    for j in np.mgrid[:self.Iheight:spacing]:
      for k in np.mgrid[:self.Iwidth:spacing]:
        vx,vy = self.vp_dirs(myR[0], k-self.Iwidth/2, j-self.Iheight/2)
        LL0.append( np.r_[k+vx*qq, j+vy*qq] )
        vx,vy = self.vp_dirs(myR[1], k-self.Iwidth/2, j-self.Iheight/2)
        LL1.append( np.r_[k+vx*qq, j+vy*qq] )
        vx,vy = self.vp_dirs(myR[2], k-self.Iwidth/2, j-self.Iheight/2)
        LL2.append( np.r_[k+vx*qq, j+vy*qq] )
    LL0=np.array(LL0)
    LL1=np.array(LL1)
    LL2=np.array(LL2)

    ax.plot( LL0[:,:2].T, LL0[:,2:].T, dir_colors[0], lw=2)
    ax.plot( LL1[:,:2].T, LL1[:,2:].T, dir_colors[1], lw=2)
    ax.plot( LL2[:,:2].T, LL2[:,2:].T, dir_colors[2], lw=2)
    ##
    #############################################################


  def calculate_rectified_observations(self, gain):
    #rotM = quaternion_to_matrix(self.orientation)
    rotM = self.orientation.rot().T
    self.i_edgels = [[],[],[]]
    self.o_edgels = [[],[],[]]
    self.s_edgels = [[],[],[]]
    self.descriptors = [[],[],[]]

    labs = self.new_labels if self.new_labels != [] else self.labels

    ## Loop through each edgel in each direction (label).
    for k,j,edx,edy,lab in np.c_[self.edgels, labs]:
      lab=int(lab)
      ## If label is not one of the three "valid" directions,
      ## discard this edgel.
      if lab>=3:
        continue

      ## k and j are the image coordinates. We first calculate the
      ## intrinsic coordinates. This should be more properly done with
      ## a full transformaton matrix, like the one OpenCV creates in
      ## the camera calibration procedure.
      x_c = (float(k) - self.middlex) / self.fd
      y_c = (float(j) - self.middley) / self.fd

      ## Store original image, and "intrinsic" coordinates of the edgel.
      self.i_edgels[lab].append([k, j, edx, edy])
      self.o_edgels[lab].append([x_c, y_c])

      ## Observation in rectangular coords.
      obs_c = np.array([ x_c, y_c, 1.0 ]) / np.sqrt(x_c**2 + y_c**2 + 1)

      ## Multiply observation direction vector with camera orientation
      ## matrix. This returns observation direction in world reference
      ## frame. From "camera-centric" to "world-centric" coordinates.
      ## Attention: changing side of application inverts matrix...
      obs_w = np.dot(obs_c, rotM.T)

      ## Calculate the spherical coordinates in different
      ## frames, according to edge direction.
      if lab == 0:
        ed_s = np.array([gain*np.arctan2(obs_w[2], obs_w[1]), np.arcsin(obs_w[0])])
      elif lab == 1:
        ed_s = np.array([gain*np.arctan2(obs_w[0], obs_w[2]), np.arcsin(obs_w[1])])
      elif lab == 2:
        ed_s = np.array([gain*np.arctan2(obs_w[1], obs_w[0]), np.arcsin(obs_w[2])])
      self.s_edgels[lab].append(ed_s)


      #########################################################
      ## Fetch color descriptors

      ## Get the derivative direction (dx,dy) from estimated
      ## camera direction instead of original gradient measurement...
      vpx,vpy = self.vp_dirs(rotM[lab], x_c, y_c)
      dx=vpy
      dy=-vpx

      if lab == 0:
        sentido = 2*(dy>0)-1
      elif lab == 1:
        sentido = 2*(dx>0)-1
      elif lab == 2:
        sentido = 2*( (np.abs(ed_s[0])<(pi/2)) * (dy>0) )-1
      sentido=sentido

      ## Pick interpolated gradient values. First check what is the
      ## "integer" coordinate.
      if np.mod(j,1) == 0:
        c = np.mod(k,1)
        cm = self.frame[j,k] + c * (self.frame[j,k+1] - self.frame[j,k])
        gx = self.gradx[j,k] + c * (self.gradx[j,k+1] - self.gradx[j,k])
        gy = self.grady[j,k] + c * (self.grady[j,k+1] - self.grady[j,k])
      else:
        c = np.mod(j,1)
        cm = self.frame[j,k] + c * (self.frame[j+1,k] - self.frame[j,k])
        gx = self.gradx[j,k] + c * (self.gradx[j+1,k] - self.gradx[j,k])
        gy = self.grady[j,k] + c * (self.grady[j+1,k] - self.grady[j,k])

      crgb1 = cm - sentido * (dx * gx + dy * gy)
      crgb2 = cm + sentido * (dx * gx + dy * gy)

      cxyy1 = rgb_to_xyy(crgb1)
      cxyy2 = rgb_to_xyy(crgb2)

      #self.descriptors[lab].append( np.r_[cxyy1,cxyy2] ) ## Use xyY colors as descriptors
      self.descriptors[lab].append( np.r_[crgb1,crgb2] ) ## Use original RBG
      ########################################################

    for lab in range(3):
      self.i_edgels[lab] = np.array(self.i_edgels[lab])
      self.o_edgels[lab]=np.array(self.o_edgels[lab])
      self.s_edgels[lab]=np.array(self.s_edgels[lab])
      self.descriptors[lab]=np.array(self.descriptors[lab])

    ## Create the KD-trees to query for points. One tree for each direction.
    self.trees=[]
    for lab in range(3):
      if self.s_edgels[lab] != []:
        self.trees.append( scipy.spatial.cKDTree( self.s_edgels[lab] ) )
      else:
        self.trees.append([])







class PictureEqrec(Picture):
  '''For equirrectangular projection images.'''

  def __init__(self, frame):
    Picture.__init__(self, frame)

  def extract_edgels(self, gstep, glim, method=0):
    Picture.extract_edgels(self, gstep, glim, method)
    self.extrinsic_edgels = np.ascontiguousarray(np.c_[
        # sin((self.edgels[:,0]-self.Iwidth/2)*pi/self.Iheight),
        # cos((self.edgels[:,0]-self.Iwidth/2)*pi/self.Iheight),
        sin((self.edgels[:,0]-self.Iwidth*3/4)*pi/self.Iheight),
        cos((self.edgels[:,0]-self.Iwidth*3/4)*pi/self.Iheight),
        sin((self.edgels[:,1]-self.Iheight/2)*pi/self.Iheight),
        cos((self.edgels[:,1]-self.Iheight/2)*pi/self.Iheight),
        self.edgels[:,2:4] ] )
  grid_sweep=extract_edgels ## Old function name


  def vp_dirs_pixel(self, vec, x,y):
    th, ph = (x-self.Iwidth/2)*pi/self.Iheight,(y-self.Iheight/2)*pi/self.Iheight
    rx,ry,rz = vec

    if ry != 1:
      x,y,z = cos(th)*cos(ph), sin(ph), -sin(th)*cos(ph)
      px,py,pz = y*rz-z*ry, z*rx-x*rz, x*ry-y*rx
      dphdth = py*(pz*cos(th)+px*sin(th)) / (py**2+(px*cos(th) - pz*sin(th))**2)
      fn = (dphdth**2+1)**-0.5
      vx,vy = (fn, dphdth*fn)
    else:
      vx,vy = (0,1)

    return (vx,vy)

  def vp_dirs(self, vec, th,ph):
    rx,ry,rz = vec

    if ry != 1:
      x,y,z = cos(th)*cos(ph), sin(ph), -sin(th)*cos(ph)
      px,py,pz = y*rz-z*ry, z*rx-x*rz, x*ry-y*rx
      dphdth = py*(pz*cos(th)+px*sin(th)) / (py**2+(px*cos(th) - pz*sin(th))**2)
      fn = (dphdth**2+1)**-0.5
      vx,vy = (fn, dphdth*fn)
    else:
      vx,vy = (0,1)

    return (vx,vy)

  def calculate_MAP_np(self, rotM):
    ## MAP calculation with equal-rectangular projection images
    self.proj[:,3] = self.p3 ## 11 degrees error

    ang_err = 0

    dx, dy = self.edgels[:,2], self.edgels[:,3]
    th, ph = self.edgels[:,0], self.edgels[:,1]

    sin_th,cos_th=sin(th),cos(th)
    sin_ph,cos_ph=sin(ph),cos(ph)

    for d in range(3):
      rx,ry,rz = rotM[d]
      if ry != 1:
        # x,y,z = cos(th)*cos(ph), sin(ph), -sin(th)*cos(ph)
        x,y,z = cos_th*cos_ph, sin_ph, -sin_th*cos_ph
        px,py,pz = y*rz-z*ry, z*rx-x*rz, x*ry-y*rx
        #dphdth = py*(pz*cos(th)+px*sin(th)) / (py**2+(px*cos(th) - pz*sin(th))**2)
        dphdth = py*(pz*cos_th + px*sin_th) / (py**2+(px*cos_th - pz*sin_th)**2)
        fn = (dphdth**2+1)**-0.5
        vx,vy = (fn, dphdth*fn)
      else:
        vx,vy = (0,1)

      self.proj[:,d] = np.abs(dx*vx+dy*vy)

    ang_err = np.sum(np.min(self.proj, 1))
    return ang_err

  def calculate_MAP(self, rotM):
    myvps = scipy.array(rotM, dtype=np.float32)
    rx = np.ascontiguousarray(myvps[:,0])
    ry = np.ascontiguousarray(myvps[:,1])
    rz = np.ascontiguousarray(myvps[:,2])

    ang_err = camori_aux.nic_MAP_estimator_edgels_eqrec( rx,ry,rz,
                             self.extrinsic_edgels,
                             self.labels,
                             self.p3 )

    return ang_err





  def calculate_rectified_observations(self, gain):
    ## Get the rotation matrix for estimate orientation. The gain controls the precision...
    rotM = quaternion_to_matrix(self.orientation)
    self.i_edgels = [[],[],[]]
    self.o_edgels = [[],[],[]]
    self.s_edgels = [[],[],[]]
    self.descriptors = [[],[],[]]


    for k,j,edx,edy,lab in np.c_[self.edgels, self.labels]:
      lab=int(lab)
      ## If label is not one of the three "valid" directions,
      ## discard this edgel.
      if lab>=3:
        continue

      ## Normalize pixel resolution.
      th_c = (k - self.Iwidth * 3 / 4) * pi / self.Iheight
      ph_c = (j - self.Iheight / 2) * pi / self.Iheight

      ## Store original image, and "intrinsic" coordinates of the edgel.
      self.i_edgels[lab].append([k, j, edx, edy])
      self.o_edgels[lab].append([th_c, ph_c])

      ## Convert from spherical to rectangular coords
      obs_c = np.array([ cos(th_c) * cos(ph_c),
                sin(ph_c),
                -sin(th_c) * cos(ph_c) ])

      ## Multiply observation direction vector with inverse of
      ## estimated camera orientation matrix. This returns
      ## observation direction in world reference frame. From
      ## "camera-centric" to "world-centric" coordinates.
      obs_w = np.dot(obs_c, rotM.T)

      ## Calculate the spherical coordinates in different
      ## frames, according to edge direction.
      if lab == 0:
        ed_s = np.array([gain*np.arctan2(obs_w[2], obs_w[1]), np.arcsin(obs_w[0])])
      elif lab == 1:
        ed_s = np.array([gain*np.arctan2(obs_w[0], obs_w[2]), np.arcsin(obs_w[1])])
      elif lab == 2:
        ed_s = np.array([gain*np.arctan2(obs_w[1], obs_w[0]), np.arcsin(obs_w[2])])
      self.s_edgels[lab].append(ed_s)


      #########################################################
      ## Fetch color descriptors

      ## Get the derivative direction (dx,dy) from estimated
      ## camera direction instead of original gradient measurement...
      vpx,vpy = self.vp_dirs(rotM[lab], th_c, ph_c)
      dx = vpy
      dy = -vpx

      if lab == 0:
        sentido = 2*(dy>0)-1
      elif lab == 1:
        sentido = 2*(dx>0)-1
      elif lab == 2:
        sentido = 2*( (np.abs(ed_s[0])<(pi/2)) * (dy>0) )-1
      sentido = sentido

      ## Pick interpolated gradient values. First check what is the
      ## "integer" coordinate.
      if np.mod(j,1) == 0:
        c = np.mod(k,1)
        cm = self.frame[j,k] + c * (self.frame[j,k+1] - self.frame[j,k])
        gx = self.gradx[j,k] + c * (self.gradx[j,k+1] - self.gradx[j,k])
        gy = self.grady[j,k] + c * (self.grady[j,k+1] - self.grady[j,k])
      else:
        c = np.mod(j,1)
        cm = self.frame[j,k] + c * (self.frame[j+1,k] - self.frame[j,k])
        gx = self.gradx[j,k] + c * (self.gradx[j+1,k] - self.gradx[j,k])
        gy = self.grady[j,k] + c * (self.grady[j+1,k] - self.grady[j,k])

      crgb1 = cm - sentido * (dx * gx + dy * gy)
      crgb2 = cm + sentido * (dx * gx + dy * gy)

      cxyy1 = rgb_to_xyy(crgb1)
      cxyy2 = rgb_to_xyy(crgb2)

      #self.descriptors[lab].append( np.r_[cxyy1,cxyy2] ) ## Use xyY colors as descriptors
      self.descriptors[lab].append( np.r_[crgb1,crgb2] ) ## Use original RBG
      ########################################################

    for lab in range(3):
      self.i_edgels[lab] = np.array(self.i_edgels[lab])
      self.o_edgels[lab] = np.array(self.o_edgels[lab])
      self.s_edgels[lab] = np.array(self.s_edgels[lab])
      self.descriptors[lab] = np.array(self.descriptors[lab])

    ## Create the KD-trees to query for points. One tree for each direction.
    self.trees=[]
    for lab in range(3):
      self.trees.append(scipy.spatial.cKDTree(self.s_edgels[lab]))




  def plot_vdirs(self, ax, spacing, myR):
    #############################################################
    ## Plot the vanishing point directions at various pixels. ax
    ## is a matplotlib axes, taken with "gca()". Spacing the
    ## separation bweteen the points, and myR the rotation matrix.
    qq = spacing*0.45*np.array([-1,+1])
    LL0=[]
    LL1=[]
    LL2=[]
    for j in np.mgrid[:self.Iheight:spacing]:
      for k in np.mgrid[:self.Iwidth:spacing]:
        # for j in np.mgrid[-pi/2+((pi)%spacing)/2:pi/2+spacing:spacing]:
        # for k in np.mgrid[-pi+((2*pi)%spacing)/2:pi+spacing:spacing]:
        th_c = (k - self.Iwidth * 3 / 4) * pi / self.Iheight
        ph_c = (j - self.Iheight / 2) * pi / self.Iheight

        vx,vy = self.vp_dirs(myR[0], th_c, ph_c)
        LL0.append( np.r_[k+vx*qq, j+vy*qq] )
        vx,vy = self.vp_dirs(myR[1], th_c, ph_c)
        LL1.append( np.r_[k+vx*qq, j+vy*qq] )
        vx,vy = self.vp_dirs(myR[2], th_c, ph_c)
        LL2.append( np.r_[k+vx*qq, j+vy*qq] )
    LL0=np.array(LL0)
    LL1=np.array(LL1)
    LL2=np.array(LL2)

    ax.plot( LL0[:,:2].T, LL0[:,2:].T, dir_colors[0], lw=2)
    ax.plot( LL1[:,:2].T, LL1[:,2:].T, dir_colors[1], lw=2)
    ax.plot( LL2[:,:2].T, LL2[:,2:].T, dir_colors[2], lw=2)
    ##
    #############################################################













class EdgelSet:
  """A set of edgels extracted from an image, in separate orthogonal directions.
  """

  def __init__(self, filename, oldgain, newgain):
    """Initializes object frmo a npz file generated by the edgel extractor program.
    """
    qqf = open(filename)
    qq = np.load(qqf)

    self.i_edgels = [ qq['i_edgels0'], qq['i_edgels1'], qq['i_edgels2'] ]

    self.orientation = qq['xopt']

    self.oldgain = oldgain
    self.newgain = newgain

    self.s_edgels = [ qq['s_edgels0'], qq['s_edgels1'], qq['s_edgels2'] ]
    for lab in range(3):
      if self.s_edgels[lab].shape[0] > 0:
        self.s_edgels[lab][:,0] *= 1./self.oldgain
        self.s_edgels[lab][:,1] *= self.newgain


    self.o_edgels = [ qq['o_edgels0'], qq['o_edgels1'], qq['o_edgels2'] ]
    self.descriptors = [ qq['descriptors0'], qq['descriptors1'], qq['descriptors2'] ]

    self.arnr = qq['arnr']
    self.arnl = qq['arnl']

    ## Create kd-trees for each direction
    self.trees = [[],[],[]]

    for lab in range(3):
      if self.s_edgels[lab] != []:
        self.trees[lab] = scipy.spatial.cKDTree(self.s_edgels[lab])

    qqf.close()

    ## The matrix used for the descriptor distance calculation
    self.Sinv = scipy.identity(6)

  ####################################################################
  ## A posteriori line extraction.
  def calc_obs(self, lab=1):
    self.obs = np.c_[ sin(self.s_edgels[lab][:,0]), cos(self.s_edgels[lab][:,0]) ]

  def cluster_points(self, lab=1):
    '''Reads the neighborhood list and come up with a dictionary with
    the label and size of each extracted line. LInes are "clusters" of
    points.'''

    self.dnr = dict([(x[1],x[2]) for x in self.arnr if x[0]==lab])
    self.dnl = dict([(x[1],x[2]) for x in self.arnl if x[0]==lab])
    self.dr = dict([(x[1],(x[2],1)) for x in self.arnr if x[0]==lab])
    self.dl = dict([(x[1],(x[2],1)) for x in self.arnl if x[0]==lab])

    ## This flag is active if any update happened in the
    ## dictionary
    flag=True
    while flag:
      ## Set flag to false before sweeping the dictionary in
      ## this iteration
      flag=False
      ## Sweeps the dictionary, and update (or not) each value
      for k0 in self.dl:
        k1, s1 = self.dl[k0]
        ## If k1 is not a key, that means we are already
        ## pointing to the last node of the line, so we stop
        ## updating.
        if k1 in self.dl:
          ## If not pointing to line end, point to the
          ## "grandson" node, and add up the distance sum.
          flag=True
          k2, s2 = self.dl[k1]
          self.dl[k0] = ( k2, s1+s2 )

    ## This flag is active if any update happened in the
    ## dictionary
    flag=True
    while flag:
      ## Set flag to false before sweeping the dictionary in
      ## this iteration
      flag=False
      ## Sweeps the dictionary, and update (or not) each value
      for k0 in self.dr:
        k1, s1 = self.dr[k0]
        ## If k1 is not a key, that means we are already
        ## pointing to the last node of the line, so we stop
        ## updating.
        if k1 in self.dr:
          ## If not pointing to line end, point to the
          ## "gandson" node, and add up the distance sum.
          flag=True
          k2, s2 = self.dr[k1]
          self.dr[k0] = ( k2, s1+s2 )




  def extract_lines(self, frac=10):
    self.all_lines = [[],[],[]]
    self.s_all_lines = [[],[],[]]
    self.all_heads = [[],[],[]]
    for lab in range(3):

      self.calc_obs(lab)
      self.cluster_points(lab)

      heads = self.all_heads[lab]
      dist=[]

      for ii in self.dr:
        end, distance = self.dr[ii]
        head, distance = self.dl[end]
        if head in heads:
          continue

        heads.append(head)
        dist.append(distance)

      heads = np.array(heads)
      dist = np.array(dist)

      Ntot = heads.shape[0]

      cut = Ntot/frac
      if cut >= 4:
        heads = heads[dist.argsort()[-1:-cut:-1]]
        dist = dist[dist.argsort()[-1:-cut:-1]]
      else:
        heads = heads[dist.argsort()]
        dist = dist[dist.argsort()]

      for h in heads:
        x = h
        myl = []
        while x in self.dnr:
          myl.append(x)
          x = self.dnr[x]
        myl.append(x)

        pts = np.array(self.i_edgels[lab][myl,:2], dtype='float32')
        s_pts = np.array(self.s_edgels[lab][myl,:2], dtype='float32')
        self.all_lines[lab].append(pts)
        self.s_all_lines[lab].append(s_pts)
      self.all_heads[lab]=heads

  def calculate_rectified_observations(self, gain):
    ## Get the rotation matrix for estimate orientation. The gain controls the precision...
    rotM = quaternion_to_matrix(self.orientation)
    self.o_edgels = [[],[],[]]
    self.s_edgels = [[],[],[]]

    ## Loop through each edgel in each direction (label).
    for lab in range(3):
      for k,j,edx,edy in self.i_edgels[lab]:
        lab=int(lab)
        ## If label is not one of the three "valid" directions,
        ## discard this edgel.
        if lab>=3:
          continue

        ## k and j are the image coordinates. We first calculate the
        ## intrinsic coordinates. This should be more properly done with
        ## a full transformaton matrix, like the one OpenCV creates in
        ## the camera calibration procedure.
        x_c = (k - self.middlex)
        y_c = (j - self.middley)
        r2_c = x_c**2 + y_c**2
        x_c = x_c * (1+1e-10*self.k_dist*r2_c) / self.fd
        y_c = y_c * (1+1e-10*self.k_dist*r2_c) / self.fd

        ## Store original image, and "intrinsic" coordinates of the edgel.
        self.o_edgels[lab].append([x_c, y_c])

        ## Observation in rectangular coords.
        obs_c = np.array([ x_c, y_c, 1.0 ]) / np.sqrt(x_c**2 + y_c**2 + 1)

        ## Multiply observation direction vector with camera orientation
        ## matrix. This returns observation direction in world reference
        ## frame. From "camera-centric" to "world-centric" coordinates.
        ## Attention: changing side of application inverts matrix...
        obs_w = np.dot(obs_c, rotM.T)

        ## Calculate the spherical coordinates in different
        ## frames, according to edge direction.
        if lab == 0:
          ed_s = np.array([gain*np.arctan2(obs_w[2], obs_w[1]), np.arcsin(obs_w[0])])
        elif lab == 1:
          ed_s = np.array([gain*np.arctan2(obs_w[0], obs_w[2]), np.arcsin(obs_w[1])])
        elif lab == 2:
          ed_s = np.array([gain*np.arctan2(obs_w[1], obs_w[0]), np.arcsin(obs_w[2])])
        self.s_edgels[lab].append(ed_s)

    for lab in range(3):
      self.o_edgels[lab] = np.array(self.o_edgels[lab])
      self.s_edgels[lab] = np.array(self.s_edgels[lab])
      self.descriptors[lab] = np.array(self.descriptors[lab])

    ## Create the KD-trees to query for points. One tree for each direction.
    self.trees=[]
    for lab in range(3):
      if self.s_edgels[lab] != []:
        self.trees.append( scipy.spatial.cKDTree( self.s_edgels[lab] ) )
      else:
        self.trees.append([])



  def calculate_rectified_observations_pinhole(self, gain):
    ## Get the rotation matrix for estimate orientation. The gain controls the precision...
    rotM = quaternion_to_matrix(self.orientation)
    self.i_edgels = [[],[],[]]
    self.o_edgels = [[],[],[]]
    self.s_edgels = [[],[],[]]
    self.descriptors = [[],[],[]]


    for k,j,edx,edy,lab in np.c_[self.edgels, self.labels]:
      lab=int(lab)
      ## If label is not one of the three "valid" directions,
      ## discard this edgel.
      if lab>=3:
        continue

      ## Normalize pixel resolution.
      th_c = (k - self.Iwidth * 3 / 4) * pi / self.Iheight
      ph_c = (j - self.Iheight / 2) * pi / self.Iheight

      ## Store original image, and "intrinsic" coordinates of the edgel.
      self.i_edgels[lab].append([k, j, edx, edy])
      self.o_edgels[lab].append([th_c, ph_c])

      ## Convert from spherical to rectangular coords
      obs_c = np.array([ cos(th_c) * cos(ph_c),
                sin(ph_c),
                -sin(th_c) * cos(ph_c) ])

      ## Multiply observation direction vector with inverse of
      ## estimated camera orientation matrix. This returns
      ## observation direction in world reference frame. From
      ## "camera-centric" to "world-centric" coordinates.
      obs_w = np.dot(obs_c, rotM.T)

      ## Calculate the spherical coordinates in different
      ## frames, according to edge direction.
      if lab == 0:
        ed_s = np.array([gain*np.arctan2(obs_w[2], obs_w[1]), np.arcsin(obs_w[0])])
      elif lab == 1:
        ed_s = np.array([gain*np.arctan2(obs_w[0], obs_w[2]), np.arcsin(obs_w[1])])
      elif lab == 2:
        ed_s = np.array([gain*np.arctan2(obs_w[1], obs_w[0]), np.arcsin(obs_w[2])])
      self.s_edgels[lab].append(ed_s)

    for lab in range(3):
      self.i_edgels[lab] = np.array(self.i_edgels[lab])
      self.o_edgels[lab] = np.array(self.o_edgels[lab])
      self.s_edgels[lab] = np.array(self.s_edgels[lab])
      self.descriptors[lab] = np.array(self.descriptors[lab])

    ## Create the KD-trees to query for points. One tree for each direction.
    self.trees=[]
    for lab in range(3):
      self.trees.append(scipy.spatial.cKDTree(self.s_edgels[lab]))




## Local variables:
## python-indent: 2
## end:
