#!/usr/bin/python
#coding:utf-8

# Copyright 2011 Nicolau Leal Werneck, Anna Helena Reali Costa and
# Universidade de São Paulo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


###############################################################################
## Trifocal 2D camera location estimation from coplanar points. The
## input to this program are files produced by extract_edgels.


from __future__ import print_function

import sys
import time
import matplotlib
#matplotlib.use('Agg') 

if __name__ == '__main__':
    if sys.argv[0][-7:] == '-nox.py':
        matplotlib.use('Agg') 

from pylab import *
import scipy.io
from numpy import dot
from scipy.optimize import fmin_cg, fmin_powell, fmin
from camori import EdgelSet, PicturePinhole, quaternion_to_matrix, measure_error, quaternion_product, dir_colors, color_distance
from camori_aux import find_line_err,  calculate_transformed_coordinates
import simplejson
import Image

set_printoptions(precision=3)

def pplot(x,*a,**kw):
    if len(x.shape)>1:
        plot(x[:,0], x[:,1], *a, **kw)
    else:
        plot(x[0], x[1], *a, **kw)

def params_err(x, lines):
    pp = array(x[:2], dtype='float32')
    fd = x[2]
    k_dist = x[3] * 1e-10
    qua = x[4:7]

    dirs = quaternion_to_matrix(qua)

    vps = array(fd * (dirs[:,:2] / dirs[:,[2,2]]), dtype='float32')

    errsum = 0.0
    transformed_points = zeros((1000,2), dtype='float32')
    for lab in range(3):
        for line in lines[lab]:
            calculate_transformed_coordinates(transformed_points, line, pp, vps[lab], k_dist)
            errsum += find_line_err(transformed_points, line.shape[0])
            #errsum += find_line_err(line-vps[lab], line.shape[0])
    return errsum

def all_errs(x, iparm, all_lines):
    return params_err(r_[iparm, x], all_lines)

def find_orientation(x0, iparm, all_lines):
    assert x0.shape[0] == 3
    return fmin_powell(all_errs, x0, args=(iparm, all_lines,), disp=False)


if __name__ == '__main__':

    ## Plot stuff immediately
    ion()

    rc('text',usetex=False)

    ## Avoid zero divide warnins...
    np.seterr(divide='ignore')

    #PlotStuff=False
    PlotStuff=True
    if sys.argv[0][-7:] == '-nox.py':
        PlotStuff = True



    #################################################################
    ## Load image and initialize pic object

    ## Sets filename from input argument
    if len(sys.argv) < 2:
        print(sys.argv[0], '<job_file.json> [single frame number]')
        raise Exception('Insufficient number of parameters')

    finput = open(sys.argv[1])
    job_params = simplejson.load(finput)
    finput.close()

    fileroot = job_params['root_directory']

    oldgain = job_params['extraction_gain']
    gain = job_params['matching_gain']


    iparm = zeros(4, dtype='float32')

    pp = array(job_params['principal_point'])
    fd = job_params['focal_distance']
    k_dist = job_params['distortion_coefficient']

    iparm = r_[pp, fd, k_dist]

    if len(sys.argv) > 2:
        the_frms = [int(sys.argv[2])]
    else:
        the_frms = range(job_params['first_frame'], 1+job_params['last_frame']) 

    for fk,frm in enumerate(the_frms):
        ## Load edgels
        filename = fileroot+'/edgels/edgels-%04d.npz'%(frm)

        Ia = EdgelSet(filename, oldgain, gain)
        ##########
        ### Just to load the image, what a waste
        filename = fileroot+'/frames/'+job_params['filename_format']%frm
        im = Image.open(filename)
        frame = array(im.convert('RGB'), dtype=float)
        imr = array(im.convert('RGB'), dtype=float)
        imr = imr[:,:,:3] #remove alpha channel
        # Smooth out
        if ("gaussian_smoothing_factor" in job_params.keys() and 
            job_params["gaussian_smoothing_factor"] > 0):
            for c in range(3):
                imr[:,:,c] = scipy.ndimage.gaussian_filter( imr[:,:,c], double(job_params["gaussian_smoothing_factor"]))
        ## Rescales image
        zzz = zeros( (imr.shape[0]/1,imr.shape[1]/1,3) )
        for chan in range(3):
            zzz[:,:,chan]=scipy.ndimage.interpolation.zoom(imr[:,:,chan], 1.0)
        ## Creates picture object
        pic = PicturePinhole(zzz, fd, pp[0], pp[1])
        #################

        Ia.middlex = pp[0]
        Ia.middley = pp[1]
        Ia.fd = fd
        Ia.k_dist = k_dist


        x0 = Ia.orientation

        ## Pick list of "good" lines
        Ia.extract_lines(job_params['line_extraction_fraction'])
        
        xopt = find_orientation(x0, iparm, Ia.all_lines)
        Ia.orientation = xopt
        print('%d %f %f %f'%(frm, Ia.orientation[0], Ia.orientation[1], Ia.orientation[2]))

        dirs = quaternion_to_matrix(Ia.orientation)

        Ia.calculate_rectified_observations(job_params['matching_gain'])
        Ia.extract_lines(job_params['line_extraction_fraction_match'])


        if PlotStuff:
            figure(20, figsize=(12.8,7.2))
            suptitle('[StreetSLAM] Orientation estimation and edge extraction', fontweight='bold', fontsize=20)
            subplot(2,2,1)

            imshow(0.75++pic.frame/1024., interpolation='nearest')
            axis([-50,1050,800,-50])

            subplot(2,2,1)
            for lab in range(3):
                for ll in Ia.all_lines[lab]:
                    pplot( ll, '-', color=dir_colors[lab], lw=1)

            for lab in range(3):
                for ll in Ia.s_all_lines[lab]:
                    subplot(2,2,lab+2)
                    pplot( ll * array([gain**-1,1]), '-', color=dir_colors[lab], lw=1)
                    #axis([-.65, .65, .5, -.5])
                    axis([-pi, pi, pi/2, -pi/2])

            # for lab in range(3):
            #     subplot(2,2,lab+2)
            #     axis('equal')

            for lab in range(3):
                subplot(2,2,lab+2)
                if lab==0:
                    #axis([0,pi,pi/2,0])
                    axis([pi/4,pi-pi/4,pi/4,0])
                elif lab==1:
                    axis([0, pi/2,pi/8,-pi/8])
                elif lab==2:
                    # axis([-pi/2,pi/2,pi/2,0])
                    axis([-pi/4,pi/4,pi/4,0])

            savefig(fileroot+'/movie2/fff-%04d.png'%frm, dpi=100)


            ## Plot estimated vanishing point directions
            figure(24, figsize=(12,9))

            title('[SteetSLAM] Predicted edge directions at multiple points')
            imshow(pic.frame/260., interpolation='nearest', extent = (-1000/2,1000/2,750/2,-750/2,))
            pic.plot_vdirs(gca(), 36, quaternion_to_matrix(Ia.orientation))
            axis('equal')
            savefig(fileroot+'/movie2/vps-%04d.png'%frm, dpi=150)


        outfile = open(fileroot+'/edgels2/edgels-%04d.npz'%frm, 'w')
        savez(outfile,
              xopt = xopt,
              i_edgels0 = Ia.i_edgels[0], 
              i_edgels1 = Ia.i_edgels[1], 
              i_edgels2 = Ia.i_edgels[2], 
              o_edgels0 = Ia.o_edgels[0], 
              s_edgels0 = Ia.s_edgels[0], 
              descriptors0 = Ia.descriptors[0], 
              o_edgels1 = Ia.o_edgels[1], 
              s_edgels1 = Ia.s_edgels[1], 
              descriptors1 = Ia.descriptors[1], 
              o_edgels2 = Ia.o_edgels[2], 
              s_edgels2 = Ia.s_edgels[2], 
              descriptors2 = Ia.descriptors[2], 
              arnl = Ia.arnl, 
              arnr = Ia.arnr
              )
        outfile.close()



    if False: #PlotStuff:
        ## Image center, and estimated principal point
        plot([500], [375], 'ko')
        plot(pp[0], pp[1], 'bd')

        plot([0,0,1000,1000,0], [0,750,750,0,0], 'k-')

        ylim(1000,-250)
        xlim(-500,1500)
        axis('equal')

        ## Plot extracted lines in image space
        figure(2)
        ylim(1000,-250)
        xlim(-500,1500)
        axis('equal')

        plot([0], [0], 'bd')

        borda = zeros((4000,2), dtype='float32')
        borda[:1000,0] = 0
        borda[:1000,1] = 750.*mgrid[0:1000]/1000.
        borda[1000:2000,0] = mgrid[0:1000]
        borda[1000:2000,1] = 750
        borda[2000:3000,0] = 1000
        borda[2000:3000,1] = 750. * (1000-mgrid[0:1000])/1000.
        borda[3000:4000,0] = 1000-mgrid[0:1000]
        borda[3000:4000,1] = 0

        b2 = zeros((4000,2), dtype='float32')
        
        calculate_transformed_coordinates(b2, borda,
                                          array(pp, dtype='float32'),
                                          zeros(2, dtype='float32'), k_dist*1e-10)
        
        pplot(b2, 'k-')

        vps = array(fd * (dirs[:,:2] / dirs[:,[2,2]]), dtype='float32')
        ll = zeros((1000,2), dtype='float32')
        for lab in range(3):
            plot(vps[lab,0], vps[lab,1], 'd', color=dir_colors[lab])

            for oll in Ia.all_lines[lab]:
                calculate_transformed_coordinates(ll, oll,
                                                  array(pp, dtype='float32'),
                                                  zeros(2, dtype='float32'), k_dist*1e-10)
                Np = oll.shape[0]
                pplot(ll[:Np], '-+', color=dir_colors[lab])

                AB = svd(ll[:Np]-vps[lab])[-1][-1]
                C = dot(AB, -vps[lab])
                if np.abs(AB[0]) < np.abs(AB[1]):
                    p1 = array([-2000, -(-2000*AB[0] + C)/AB[1] ])
                    p2 = array([ 3000, -( 3000*AB[0] + C)/AB[1] ])
                else:
                    p1 = array([-(-2000*AB[1] + C)/AB[0],-2000 ])
                    p2 = array([-( 3000*AB[1] + C)/AB[0], 3000 ])

                plot([p1[0], p2[0]], [p1[1],p2[1]], '--', alpha=0.5, color=dir_colors[lab])




