#!/usr/bin/python
#coding:utf-8

# Copyright 2011 Nicolau Leal Werneck, Anna Helena Reali Costa and
# Universidade de São Paulo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


###############################################################################
## Trifocal 2D camera location estimation from coplanar points. The
## input to this program are files produced by extract_edgels.


from __future__ import print_function

import sys

import time

import matplotlib
#matplotlib.use('Agg') 

if __name__ == '__main__':
    if sys.argv[0][-7:] == '-nox.py':
        matplotlib.use('Agg') 

from pylab import *

import scipy.io

from numpy import dot

from scipy.optimize import fmin

from camori import EdgelSet, PicturePinhole, quaternion_to_matrix, measure_error, quaternion_product, dir_colors, color_distance

import simplejson

import Image

set_printoptions(precision=3)

def pplot(x,*a,**kw):
    if len(x.shape)>1:
        plot(x[:,0], x[:,1], *a, **kw)
    else:
        plot(x[0], x[1], *a, **kw)

def target_f(x, M, k):
    res = np.abs(dot(M[:,:-1], x) - M[:,-1])
    q = argsort(res)
    return res[q[k]]

if __name__ == '__main__':

    ## Plot stuff immediately
    ion()

    rc('text',usetex=False)

    ## Avoid zero divide warnins...
    np.seterr(divide='ignore')

    #PlotStuff=False
    PlotStuff=True

    #################################################################
    ## Load image and initialize pic object

    ## Sets filename from input argument
    if len(sys.argv) < 5:
        print(sys.argv[0], '<job_file.json> <frame_number 1 2 3>')
        raise Exception('Insufficient number of parameters')

    finput = open(sys.argv[1])
    job_params = simplejson.load(finput)
    finput.close()

    fileroot = job_params['root_directory']

    oldgain = job_params['extraction_gain']
    gain = job_params['matching_gain']

    ## Get matrix for color distance comparison
    aa = np.load(fileroot +'/covs.npz')
    Mcov = aa['pcov']
    Sinv = inv(Mcov)

    ## Load edgels
    filename = fileroot+'/edgels2/edgels-%04d.npz'%(int(sys.argv[2]))
    Ia = EdgelSet(filename, oldgain, gain)

    filename = fileroot+'/edgels2/edgels-%04d.npz'%(int(sys.argv[3]))
    Io = EdgelSet(filename, oldgain, gain)

    filename = fileroot+'/edgels2/edgels-%04d.npz'%(int(sys.argv[4]))
    Ib = EdgelSet(filename, oldgain, gain)



    ## Label, the direction we are using.
    lab = 1

    
    Io.calc_obs(lab)
    Ia.calc_obs(lab)
    Ib.calc_obs(lab)
    Io.cluster_points(lab)
    Ia.cluster_points(lab)
    Ib.cluster_points(lab)

    ###########################################################################
    ## Now we finall start the estimation work.
    
    cam=Io
    
    ## Select "viable" observations. Usually the ones more to the
    ## orthogonal directions of motion. I.e., avoid points that are
    ## too distant and will provide too little disparity.

    ## These first ones are for equirectangular, omnidirectional images.
    # viable_obs = [ x for x in cam.dr if x in cam.dl and (cam.dl[x][1]+cam.dr[x][1])>3
    #                and np.abs(cam.dl[x][1]-cam.dr[x][1]) <3 ]
    # viable_obs = [k for k in range(Io.obs.shape[0]) 
    #               if (np.abs(Io.s_edgels[1][k,0]) > pi/6 
    #                   and np.abs(Io.s_edgels[1][k,0])< 5*pi/6) ]

    # ## For normal perspective images (pcs02 dataset)
    # viable_obs = [ x for x in cam.dr if (x in cam.dl
    #                                      and (cam.dl[x][1]+cam.dr[x][1]) > 3
    #                                      and np.abs(cam.dl[x][1]-cam.dr[x][1]) < 100)]
    #                                      # and np.abs(Io.s_edgels[1][x,0]) < 1.4)]

    ## Just pick every point...
    viable_obs = [ x for x in range(Io.s_edgels[lab].shape[0])]

    ## This here would limit how many points from a same line... Only pick a few in the middle.
    # 

    # viable_obs = [k for k in range(Io.obs.shape[0]) if np.abs(Io.s_edgels[1][k,0]) < 1.4 ]

    ## M is the matrix that holds the coefficients calculated for each
    ## point.
    nene = job_params['nearest_neighbors']
    max_dist = job_params['matching_distance']
    color_lim = job_params['color_threshold']**2
    M = zeros((nene**2*len(viable_obs), 4))
    Mk=0

    matches = []

    for ind_o in viable_obs:
        print('###', ind_o)
        
        ## Observation in spherical coordinates, to look for matches.
        to = Io.s_edgels[lab][ind_o]

        ## The observation unit vector on the plane
        po = Io.obs[ind_o]

        iia = Ia.trees[lab].query(to, nene, 0, 2, max_dist)[1]
        iib = Ib.trees[lab].query(to, nene, 0, 2, max_dist)[1]

        iiia = [i for i in iia if i != Ia.trees[lab].n and \
                    color_distance(Sinv, Io.descriptors[lab][ind_o],
                                   Ia.descriptors[lab][i]) < color_lim]
        iiib = [i for i in iib if i != Ib.trees[lab].n and \
                    color_distance(Sinv, Io.descriptors[lab][ind_o],
                                   Ib.descriptors[lab][i]) < color_lim]

        ## Enforce that camera is moving +- in straight line 
        # iiia = [i for i in iia if i != Ia.trees[lab].n and \
        #             color_distance(Sinv, Io.descriptors[lab][ind_o],
        #                            Ia.descriptors[lab][i]) < color_lim\
        #             and Io.s_edgels[lab][ind_o][0] > Ia.s_edgels[lab][i][0] ]
        # iiib = [i for i in iib if i != Ib.trees[lab].n and \
        #             color_distance(Sinv, Io.descriptors[lab][ind_o],
        #                            Ib.descriptors[lab][i]) < color_lim\
        #             and Io.s_edgels[lab][ind_o][0] < Ib.s_edgels[lab][i][0] ]


        keepon = True

        if iiia and iiib:
            ind_a, ind_b = iiia[0], iiib[0]

            print(ind_o, ind_a, ind_b)
            print(Ia.descriptors[lab][ind_a])
            print(Io.descriptors[lab][ind_o])
            print(Ib.descriptors[lab][ind_b])

            matches.append((ind_a, ind_o, ind_b))

            pa = Ia.obs[ind_a]
            pb = Ib.obs[ind_b]
            print()

            ## Produce each of the matrix'es lines
            eta_a = po[0] * pa[1] - po[1] * pa[0]
            eta_b = po[0] * pb[1] - po[1] * pb[0]
            M[Mk,0] =  pa[1] / eta_a
            M[Mk,1] = -pa[0] / eta_a
            M[Mk,2] = -pb[1] / eta_b
            M[Mk,3] =  pb[0] / eta_b

            Mk+=1


    ## Optimization to estimate camera motion.
    w_ini = array([0,0,0])
    k_target = 100 ## The k-th error to minimize
    w_opt = fmin(target_f, w_ini, args=(M, k_target))
    print('solu:', w_opt)


    if PlotStuff:
        ## Plot all observations freom all cameras
        figure(1)
        pplot(Ia.s_edgels[1], 'b+', mew=1)
        pplot(Io.s_edgels[1], 'g+', mew=1)
        pplot(Ib.s_edgels[1], 'r+', mew=1)
        pplot(cam.s_edgels[1][viable_obs],  'gx')

        for ind_a, ind_o, ind_b in matches:
            plot( [
                   Io.s_edgels[1][ind_o,0],
                   Ib.s_edgels[1][ind_b,0]],
                  [
                   Io.s_edgels[1][ind_o,1],
                   Ib.s_edgels[1][ind_b,1]],
                  'r:'                  )
            plot( [Ia.s_edgels[1][ind_a,0],
                   Io.s_edgels[1][ind_o,0],
                   ],
                  [Ia.s_edgels[1][ind_a,1],
                   Io.s_edgels[1][ind_o,1],
                   ],
                  'b:'                  )

        axis([-4,4,2,-2])
        #axis('equal')

        ## Plot all observations from all cameras
        figure(2)
        suptitle('Z')
        pplot(Ia.s_edgels[2], 'b+',mew=1)
        pplot(Io.s_edgels[2], 'g+',mew=1)
        pplot(Ib.s_edgels[2], 'r+',mew=1)

        axis([-4,4,2,-2])
        #axis('equal')

        ## Plot all observations from all cameras
        figure(3)
        suptitle('X')
        pplot(Ia.s_edgels[0], 'b+',mew=1)
        pplot(Io.s_edgels[0], 'g+',mew=1)
        pplot(Ib.s_edgels[0], 'r+',mew=1)

        axis([-4,4,2,-2])
        #axis('equal')





        # figure(4)
        # res = np.abs( dot(M, r_[w_opt,1] ))
        # q = argsort(res)
        # semilogy(res[q], '-+')




