import numpy as np
import scipy
import time
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import scipy.ndimage as ndi
import scipy.io
import sys

import scipy.linalg as la

maxiteration = 500
miniterations = 50

Rs = np.zeros((maxiteration+1, 3, 3))
ts = np.zeros((maxiteration+1, 3))
D = 3 # 3d
outlierthreshold = 1 #3
annealrate = .93 #.99
minsigma = 0.0005

# this is the proportion expected to be outliers
uniformproportion = 1./ 5
# the density for the uniform distribution which generates outliers
uniformdensity = 1./ 8

# main function for combining two point clouds
# where X is the reference cloud
# and Y is the cloud to be transformed
# R and t are initial guesses for the transformations
# Confidence is how confident one is in initial predication
def probcombine(X, Y, R = np.diag([1.,1.,1.]), t = np.zeros((3,1)), conf=1.):
  global Rs, ts, objectives
  Rs[0] = R
  ts[0] = t[:,0]
  obep  = 0.
  P = np.ones((Y.shape[1], X.shape[1])) / float(Y.shape[1])
  Pi = np.ones(Y.shape[1])/(Y.shape[1]) * (1-uniformproportion)
  #Praw = P/P.sum()
  sigma2 = findsigma2(X,Y,R,t)/(conf*conf)
  objectives = np.zeros(maxiteration)

  print 'initial %f' % objfunc(X,Y,R,t,P,sigma2,Pi)
  for i in range(maxiteration):
    #obi = objfunc(X,Y,R,t,P,sigma2,Pi)
    ######### E-step ##########
    Praw = findPraw(X,Y,R,t,sigma2,Pi)
    uniform =  uniformdensity
    P =  Praw / (uniform*uniformproportion\
                 + Praw.sum(0)[None, :])
    #assert X.shape[1] + 1e3  > P.sum() > X.shape[1] - 1e3
    ####### End E-Step ########

    #obe = objfunc(X,Y,R,t,P,sigma2,Pi)

    ######### M-Step ##########
    R,t = findRt(X,Y,P,sigma2)
    obm = objfunc(X,Y,R,t,P,sigma2,Pi)
    objectives[i] = obm
    #sigma2 = findsigma2(X,Y,R,t)
    #Pi = P.sum(1)/P.sum()
    Pi = np.ones(Y.shape[1])/Y.shape[1]
    ####### End M-Step ########

    Rs[i+1,:,:] = R; ts[i+1,:] = t[:,0]
    sigma = np.sqrt(sigma2)
    mean = P.sum(0).mean()
    median = np.median(P.sum(0))
    print 'i=%d obM = %f sigma2 = %f sigma = %f\
 mean = %f median = %f size = %d sumP = %f'\
       % (i, obm, sigma2, sigma, mean, median, X.shape[1], P.sum())
    #print R
    #print t
 
    #print R.flatten()
    if i>miniterations:
      if sigma < minsigma or mean < .8:
       break;
      #elif i>miniterations and objectives[i] < objectives[i-1]:
      #  break;
    #obep = obe
    
    # Anneal
    sigma2 *= annealrate
    #X = rejectoutliers(X,Y,R,t,sigma2/conf)
  return R,t

def rejectoutliers(X,Y,R,t,sigma2):
  if X.shape[1] < Y.shape[1]/3: return X
  assert X.shape[0] == Y.shape[0] == R.shape[0] == t.size
  Ys = np.dot(R,Y) + t
  Nr = X.shape[1]; Nd = Y.shape[1]
  P = np.zeros((Nd, Nr))
  for r in range(Nd):
    diff = X - Ys[:,r:r+1]
    P[r,:] = (diff * diff).sum(0) / (2.*sigma2)
    
  Pmin = P.min(0)
  print Pmin.max()
  accept = Pmin < outlierthreshold
  
  #print accept.astype(int).sum()
  # return column normalized P matrix
  return X[:,accept]
  

# Find the variance of the mixture components
# approx: just use some sample points if set True
# use all pairs if set false
def findsigma2(X, Y, R, t, approx = False):
  assert X.shape[0] == Y.shape[0] == R.shape[0] == t.size
  Ys = np.dot(R,Y) + t
  totalerr = 0.
  totalnum = X.shape[1] * Y.shape[1]
  if approx:
    Xperm = np.random.permutation()
    raise NotImplementedError
  else:
    for i in range(Y.shape[1]):
      diff = (X - Ys[:,i:i+1])
      sqerr = (diff*diff).sum(0)
      totalerr += sqerr.sum()
    return totalerr/totalnum
  
# Find the D by R matrix P. This is the E step.
def findPraw(X,Y,R,t,sigma2,Pi):
  assert X.shape[0] == Y.shape[0] == R.shape[0] == t.size
  Ys = np.dot(R,Y) + t
  Nr = X.shape[1]; Nd = Y.shape[1]

  Praw = np.zeros((Nd, Nr))
  for r in range(Nd):
    diff = X - Ys[:,r:r+1]
    Praw[r,:] = (2*np.pi*sigma2) ** (-D/2.) *\
    np.exp( - (diff * diff).sum(0) / (2.*sigma2) )
  
  # return column normalized P matrix
  return Praw*Pi[:,None]
  
# Find R and t. This is the M step.
# try to find a standard optimization routine for this...
def findRt(X, Y, P, sigma2):
  return findRtdirect(X,Y,P,sigma2)

def findRtdirect(X,Y,P,sigma2):
  assert X.shape[0] == Y.shape[0]
  Nr = X.shape[1]; Nd = Y.shape[1]
  
  # Solve Nr * t = coeffX3x1 - R coeffR3x1
  # Solve R (S*Syyt - np.dot(Sy, Sy.T)) = (Sxyt * S) - np.dot(Sx, Sy.T)
  S = P.sum()
  #print S
  #print Nr
  tempPyt = (P.sum(1)[None,:] * Y).T
  Syyt = np.dot(Y, tempPyt)
  Sy = tempPyt.sum(0)[:,None] / S
  Sx = (P.sum(0)[None,:] * X).sum(1)[:,None] / S

  Xp = X - Sx
  Yp = Y - Sy
  
  Sxy = np.dot(np.dot(Xp, P.T), Yp.T)
  U, C, V = la.svd(Sxy)
  R = np.dot(U,V)
  t = Sx - np.dot(R, Sy)
  
  return R,t


def findRtdirect2(X,Y,P,sigma2):
  assert X.shape[0] == Y.shape[0]
  Nr = X.shape[1]; Nd = Y.shape[1]
  
  # Solve Nr * t = coeffX3x1 - R coeffR3x1
  # Solve R (S*Syyt - np.dot(Sy, Sy.T)) = (Sxyt * S) - np.dot(Sx, Sy.T)
  S = Nr
  tempPyt = (P.sum(1)[None,:] * Y).T
  Syyt = np.dot(Y, tempPyt)
  Sy = tempPyt.sum(0)[:,None]
  Sx = (P.sum(0)[None,:] * X).sum(1)[:,None]

  
  Sxyt = np.zeros((3,3))
  for r in range(Nd):
    Xscaled = P[r:r+1,:] * X
    term = (Xscaled[:,:,None] * Y[:,r][None,None,:]).sum(1)
    Sxyt += term

  A = S*Syyt - np.dot(Sy, Sy.T)
  B = Sxyt * S - np.dot(Sx, Sy.T)
  # now solve for RA = B or A'R' = B'

  soln = la.lstsq(A.T, B.T)
  R = soln[0].T; r = soln[2]
  
  t = (Sx - np.dot(R, Sy)) / S
  return R,t

def objfunc(X,Y,R,t,P,sigma2, Pi = None, bound=False):
  if bound: return upperbound(X,Y,R,t,P,sigma2)
  else: return likelihood(X,Y,R,t,P,sigma2,Pi)

def likelihood(X,Y,R,t,P,sigma2,Pi):
  llh = 0
  Ys = np.dot(R,Y) + t
  # see pg 439 in Bishop for the likelihood formula
  for c in range(X.shape[1]):
    diff = Ys - X[:,c:c+1]
    normal = np.exp(-(diff ** 2).sum(0)/(2*sigma2))/((sigma2*2*np.pi)**(D/2))
    inside = (normal * Pi).sum() + uniformproportion*uniformdensity
    llh += np.log(inside)
  return llh

def upperbound(X,Y,R,t,P,sigma2):
  Nr = X.shape[1]; Nd = Y.shape[1]
  Ys = np.dot(R,Y) + t
  diffs = np.zeros((Nd,Nr))  
  for r in range(Y.shape[1]):
    diff = X - Ys[:,r:r+1]
    diffs[r,:] = (diff ** 2).sum(0)
  
  return (P * diffs).sum() / (2*sigma2)

# correct errors in homogenous coordinate. So the entire thing comes back full circle
def makeprodidentity(Rs,ts,iterations = 10):
  n = Rs.shape[0]
  Hs = np.zeros((n, 4, 4))
  Hs[:,0:3,0:3] = Rs[::-1]
  Hs[:,0:3,3:4] = ts[::-1]
  Hs[:,3,3] = 1.
  
  for iter in range(iterations):
    P = np.identity(4)
    for i in range(n):
      P = np.dot(Hs[i], P)

    print P
    print la.norm(P-np.identity(4), ord=2)
    # P = H1 H2 H3..Hn should be identity
    # I =  H1 H2 ... Hn
    
    R = P
    L = np.identity(4)
    # in iteration i
    # we want I = E H0 H1 .. Hn
    # then H0-1 = E H1 H2 .. Hn
    # then H1-1 H0-1 = E H2 .Hn
    # L = correction R
    # Hi = nthroot(correction) Hi
    for i in range(n):
      correction = np.dot(L, la.inv(R))
      diff = (correction - np.identity(4))/n  
      invrootn = np.identity(4) + diff
      
      Hs[i] = np.dot(invrootn, Hs[i])
      Hs[i,3,0:3] = 0
      u,c,v = la.svd(Hs[i])
      Hs[i] = np.dot(u,v)

      R = np.dot(Hs[i], R)
      L = np.dot(Hs[i], L)
  
  retRs = Hs[::-1,0:3,0:3]
  retts = Hs[::-1,0:3,3:4]
  return retRs, retts
