#!/usr/bin/python
#coding:utf-8

# Copyright 2012 Nicolau Leal Werneck, Anna Helena Reali Costa and
# Universidade de São Paulo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


from camori import *

from quaternion import Quat

from simplex_sa import SimplexSO3

## Colors used for plotting different labels (directions)
dir_colors=['#ea6949', '#51c373', '#a370ff', '#444444']


def aligned_quaternion(v):
    ## The largest component
    ll = np.argmax(np.abs(v))
    ee = np.zeros(3)
    ee[ll] = np.sign(v[ll])
    q=Quat(np.cross(v, ee))
    R = q.sqrt()

    if np.sign(v[ll]) > 0:
        if ll == 1:
            R = R*Quat(1,-1,-1,-1).normalize()
        if ll == 2:
            R = R*Quat(1,1,1,1).normalize()

    if np.sign(v[ll]) < 0:
        if ll == 0:
            R = R*Quat(0,0,1,0).normalize()
        if ll == 1:
            R = R*Quat(1,1,-1,1).normalize()
        if ll == 2:
            R = R*Quat(1,-1,-1,1).normalize()
    return R.inverse()

class PictureHarris(Picture):
  '''For perspective transform, "pinhole" cameras.'''

  def __init__(self, frame, middlex=None, middley=None, fd=0.0, distortion = 0.0):

    Picture.__init__(self, frame)

    ## Focal distance (default)
    self.fd = fd

    ## Principal point
    self.middlex = middlex if middlex else (self.Iwidth-1)/2.0
    self.middley = middley if middley else (self.Iheight-1)/2.0

    self.distortion = distortion

  def extract_edgels(self, gstep, glim, method=0):
    ## Call basic class edgel extractor
    Picture.extract_edgels(self, gstep, glim, method)

  def fit_error(self, midx, midy, fd, k, q, pf1,pf2,pf3):
    err = camori_aux.harris_angle_error(midx, midy, fd, k, q,
                                        self.edgels, pf1,pf2,pf3)
    return err

  def residues(self, midx, midy, fd, k, q, pf1,pf2,pf3):
    err = camori_aux.harris_angle_residues(midx, midy, fd, k, q,
                                        self.edgels, pf1,pf2,pf3)
    return err

  ##################################################################
  ## Calculate initial estimate by picking random edgels and
  ## calculating an orientation from them, in a direct,
  ## non-iterative way. This is a kind of random search in a space
  ## that depends on the observations, so it should be better than
  ## sweeping the whole space in a ergular way, like a complete
  ## idiot. This "adaptive" sampling of the parameter space is
  ## pretty much the same that is used in RANSAC or un
  ## J-linkage. But it's the same final target function that we are
  ## calculating at each point, not something else.
  def random_search(self, initial_trials, fp=(1,3,0.15)):
    ## Default M-estimator uses Tukey with 0.15 error variance.

    bestv = np.Inf ## Smallest value found
    for k in range(initial_trials):
      ## Pick indices of the reference normals. Re-sample until we
      ## get a list of three different values.
      pk_a = np.random.random_integers(0,self.Ned-1)
      pk_b = np.random.random_integers(0,self.Ned-1)
      while pk_b == pk_a:
          pk_b = np.random.random_integers(0,self.Ned-1)
      pk_c = np.random.random_integers(0,self.Ned-1)
      while pk_c == pk_a or pk_c == pk_b:
          pk_c = np.random.random_integers(0,self.Ned-1)

      ## Get the normals with the first two chosen indices, and
      ## calculate a rotation matrix that has the x axis aligned to
      ## them.
      n_a = self.normals[pk_a]
      n_b = self.normals[pk_b]
      vp1 = np.cross(n_a, n_b)
      vp1 = vp1 * (vp1**2).sum()**-0.5
      q1 = aligned_quaternion(vp1)

      ## Pick a new random third norm, and find the rotation to align
      ## the y direction to this edgel.
      n_c = self.normals[pk_c]
      vaux = np.dot(n_c, q1.rot())
      ang = np.arctan2(vaux[1], -vaux[2])
      q2 = Quat(sin(ang/2),0,0) * q1 ## The resulting orientation

      ## Find the value of the tager function for this sampled
      ## orientation.
      newv = harris_angle_error(\
        self.middlex, self.middley,self.fd,self.distortion,
        q2.q, self.edgels, *fp)
      ## If the value is the best yet, store solution.
      if newv <= bestv :
        bestv = newv
        bpk_a = pk_a
        bpk_b = pk_b
        bpk_c = pk_c
        qopt = q2
    return qopt, bpk_a, bpk_b, bpk_c



  def find_orientation_cal_first(self, ip_in, xini):
    def loss_func(qq, pic, ip):
      loss_par = (1.0, 4, 0.15)
      q = Quat(qq).q
      return pic.fit_error(ip[0], ip[1], ip[2], ip[3],
                           qq, *loss_par)

    ## Set vector of internal parameters to be used.
    if len(ip_in) == 2:
      ip = np.array([self.middlex, self.middley, ip_in[0], ip_in[1]])
    elif len(ip_in) == 4:
      ip = np.array(ip_in)
    else:
      raise "Invalid number of internal parameters."

    errs=[]
    for xi in xini:
        ss = SimplexSO3(loss_func, Quat(xi), params=(self,ip))
        ss.opt_loop(1000)
        xopt = Quat(ss.best).canonical()
        val = loss_func(xopt.q, self, ip)
        errs.append((xopt,val))
    best_idx = np.argmin([ee[1] for ee in errs])
    qopt = (errs[best_idx][0]).canonical()
    vopt = errs[best_idx][1]

    return qopt, vopt

  def find_orientation_cal(self, ip_in, first=[]):
    def loss_func(q, *ip):
      pf = (1/.15,3,1.0)
      #pf = (1/.1,3,1.0)
      return self.fit_error(ip[0], ip[1], ip[2], ip[3], q, *pf)

    ## Set vector of internal parameters to be used.
    if len(ip_in) == 2:
      ip = np.array([self.middlex, self.middley, ip_in[0], ip_in[1]])
    elif len(ip_in) == 4:
      ip = np.array(ip_in)
    else:
      raise "Invalid number of internal parameters."

    ss = SimplexSO3(loss_func, xini=self.orientation, params=ip)
    ss.reset_state(self.orientation.q)
    ss.opt_loop(10000, xtol=1e-10, ftol=1e-10)
    # ss.opt_loop(10000, xtol=1e-5, ftol=1e-5)
    #self.orientation = Quat(ss.best)
    return ss.best, ss.fbest


  def plot_vdirs(self, ax, spacing, orientation):
    #############################################################
    ## Plot the vanishing point directions at various pixels. ax
    ## is a matplotlib axes, taken with "gca()". Spacing the
    ## separation bweteen the points, and myR the rotation matrix.
    qq = spacing*0.45*np.array([-1,+1])
    bx = 0.+(self.Iwidth/2)%spacing
    by = 0.+(self.Iheight/2)%spacing
    qL = np.mgrid[bx:self.Iwidth:spacing,by:self.Iheight:spacing].T.reshape((-1,2))
    Nq = qL.shape[0]
    vL = harris_vdirs(self.middlex, self.middley, self.fd, self.distortion,
                                orientation.q, np.array(qL, dtype=np.float32))
    LL = np.zeros((3,Nq,4))
    for lab in range(3):
      for num in range(Nq):
        vx,vy = vL[lab,num]
        k,j = qL[num]
        LL[lab,num,:] = np.r_[k+vx*qq, j+vy*qq]
    for lab in range(3):
      ax.plot( LL[lab,:,:2].T, LL[lab,:,2:].T, dir_colors[lab], lw=2)
    ##
    #############################################################

  def plot_equidistant_vdirs(self, ax, spacing, orientation):
    #############################################################
    ## Plot the vanishing point directions at various pixels. ax
    ## is a matplotlib axes, taken with "gca()". Spacing the
    ## separation bweteen the points, and myR the rotation matrix.
    qq = spacing*0.45*np.array([-1,+1])
    bx = 0.+(self.Iwidth/2)%spacing
    by = 0.+(self.Iheight/2)%spacing
    qL = np.mgrid[bx:self.Iwidth:spacing,by:self.Iheight:spacing].T.reshape((-1,2))
    Nq = qL.shape[0]
    vL = equidistant_vdirs(self.middlex, self.middley, self.fd, self.distortion,
                                orientation.q, np.array(qL, dtype=np.float32))
    LL = np.zeros((3,Nq,4))
    for lab in range(3):
      for num in range(Nq):
        vx,vy = vL[lab,num]
        k,j = qL[num]
        LL[lab,num,:] = np.r_[k+vx*qq, j+vy*qq]
    for lab in range(3):
      ax.plot( LL[lab,:,:2].T, LL[lab,:,2:].T, dir_colors[lab], lw=2)
    ##
    #############################################################

  def calculate_edgel_normals(self):
    ## Calculate the normals vector of each edgel, given the
    ## intrinsic parameters (and distortion).
    
    self.normals = harris_normals(self.middlex, self.middley, self.fd, self.distortion,
                                        self.edgels)

  def calculate_rectified_observations(self, gain):
    #rotM = quaternion_to_matrix(self.orientation)
    rotM = self.orientation.rot().T
    self.i_edgels = [[],[],[]]
    self.o_edgels = [[],[],[]]
    self.s_edgels = [[],[],[]]
    self.descriptors = [[],[],[]]

    labs = self.new_labels if self.new_labels != [] else self.labels

    ## Loop through each edgel in each direction (label).
    for k,j,edx,edy,lab in np.c_[self.edgels, labs]:
      lab=int(lab)
      ## If label is not one of the three "valid" directions,
      ## discard this edgel.
      if lab>=3:
        continue

      ## k and j are the image coordinates. We first calculate the
      ## intrinsic coordinates. This should be more properly done with
      ## a full transformaton matrix, like the one OpenCV creates in
      ## the camera calibration procedure.
      x_c = (float(k) - self.middlex) / self.fd
      y_c = (float(j) - self.middley) / self.fd

      ## Store original image, and "intrinsic" coordinates of the edgel.
      self.i_edgels[lab].append([k, j, edx, edy])
      self.o_edgels[lab].append([x_c, y_c])

      ## Observation in rectangular coords.
      obs_c = np.array([ x_c, y_c, 1.0 ]) / np.sqrt(x_c**2 + y_c**2 + 1)

      ## Multiply observation direction vector with camera orientation
      ## matrix. This returns observation direction in world reference
      ## frame. From "camera-centric" to "world-centric" coordinates.
      ## Attention: changing side of application inverts matrix...
      obs_w = np.dot(obs_c, rotM.T)

      ## Calculate the spherical coordinates in different
      ## frames, according to edge direction.
      if lab == 0:
        ed_s = np.array([gain*np.arctan2(obs_w[2], obs_w[1]), np.arcsin(obs_w[0])])
      elif lab == 1:
        ed_s = np.array([gain*np.arctan2(obs_w[0], obs_w[2]), np.arcsin(obs_w[1])])
      elif lab == 2:
        ed_s = np.array([gain*np.arctan2(obs_w[1], obs_w[0]), np.arcsin(obs_w[2])])
      self.s_edgels[lab].append(ed_s)


      #########################################################
      ## Fetch color descriptors

      ## Get the derivative direction (dx,dy) from estimated
      ## camera direction instead of original gradient measurement...
      vpx,vpy = self.vp_dirs(rotM[lab], x_c, y_c)
      dx=vpy
      dy=-vpx

      if lab == 0:
        sentido = 2*(dy>0)-1
      elif lab == 1:
        sentido = 2*(dx>0)-1
      elif lab == 2:
        sentido = 2*( (np.abs(ed_s[0])<(pi/2)) * (dy>0) )-1
      sentido=sentido

      ## Pick interpolated gradient values. First check what is the
      ## "integer" coordinate.
      if np.mod(j,1) == 0:
        c = np.mod(k,1)
        cm = self.frame[j,k] + c * (self.frame[j,k+1] - self.frame[j,k])
        gx = self.gradx[j,k] + c * (self.gradx[j,k+1] - self.gradx[j,k])
        gy = self.grady[j,k] + c * (self.grady[j,k+1] - self.grady[j,k])
      else:
        c = np.mod(j,1)
        cm = self.frame[j,k] + c * (self.frame[j+1,k] - self.frame[j,k])
        gx = self.gradx[j,k] + c * (self.gradx[j+1,k] - self.gradx[j,k])
        gy = self.grady[j,k] + c * (self.grady[j+1,k] - self.grady[j,k])

      crgb1 = cm - sentido * (dx * gx + dy * gy)
      crgb2 = cm + sentido * (dx * gx + dy * gy)

      cxyy1 = rgb_to_xyy(crgb1)
      cxyy2 = rgb_to_xyy(crgb2)

      #self.descriptors[lab].append( np.r_[cxyy1,cxyy2] ) ## Use xyY colors as descriptors
      self.descriptors[lab].append( np.r_[crgb1,crgb2] ) ## Use original RBG
      ########################################################

    for lab in range(3):
      self.i_edgels[lab] = np.array(self.i_edgels[lab])
      self.o_edgels[lab]=np.array(self.o_edgels[lab])
      self.s_edgels[lab]=np.array(self.s_edgels[lab])
      self.descriptors[lab]=np.array(self.descriptors[lab])

    ## Create the KD-trees to query for points. One tree for each direction.
    self.trees=[]
    for lab in range(3):
      if self.s_edgels[lab] != []:
        self.trees.append( scipy.spatial.cKDTree( self.s_edgels[lab] ) )
      else:
        self.trees.append([])

## Local variables:
## python-indent: 2
## end:
