#!/usr/bin/env python
# encoding: utf-8
"""
Reducer.py

This class allows reducing data sets from high to low dimensionality.

Created by Alakazam on 2008-06-30.
Copyright (c) 2008. All rights reserved.
"""

# Import statements
from numpy       import array,     concatenate, dot,   exp,  linalg, max, ones
from numpy       import repeat,    reshape,     shape, sign, sqrt,   sum
from numpy       import tile,      trace,       transpose,   zeros

from RandomArray import permutation, random
from hcluster    import pdist, squareform
from pca_module  import PCA_nipals_c
from scikits.ann import kdtree

import os.path
from sys     import path, stdout
path.append(os.path.dirname(__file__) + '/pysomap')
from pysomap import isodata

from Log            import Log

# Use psyco for jit compilation
import psyco
if __name__ == '__main__':
  psyco.log()
psyco.full()

class Reducer(object):
  def __init__(self, data=None, algorithm="PCA", k=5, outDim=3, log=None):
    """Initialize options
    algorithm: reduction algorithm to use
    k:         number of neighbouring points to consider for nearest neighbour algorithms
    outDim:    target dimensionality for the reduction algorithm

    log:       if != None, object to use to log progress, otherwise progress is printed
    """
    
    # Check for invalide option values
    if algorithm not in Reducer.algorithms:
      raise NotImplementedError, 'This reduction algorithm has not been implemented'
    
    if k >= data.shape[0]:
      raise ValueError, 'k, number of nearest neighbours must be less than n, number of input datapoints'
    
    # Logging
    if log == None:
      self.log = Log(verbosity=Log.VERBOSE)
    else:
      self.log = log
    
    self.log("Logging enabled...")
    
    self.data = data
    
    # Generate data
    self.n, self.dim = self.data.shape
    
    # Reduction options
    self.algorithm = algorithm
    self.k         = k
    self.outDim    = outDim
  
  def doReduction(self):
    """Do reduction of the data"""
    self.knn = None
    
    self.log.start("Running reduction: %s"%self.algorithm)
    
    self.__getattribute__('do' + self.algorithm)()
    
    self.log.stop()
    
    # If the reduction algorithm hasn't calculated the nearest neighbours,
    # do it now
    if self.knn == None:
      self.log.start('Calculating nearest neighbours and distance matrix')

      self.knn, foo = kdtree(self.data).knn(self.data, self.k)

      self.log.stop()
    
    del self.data
  
  # List of existing reduction algorithms
  algorithms = ["None", "PCA", "Isomap", "DiffusionMaps", "MDS"]
  
  def doNone(self):
    """Do identity reduction"""
    
    self.outData = self.data[:, 0:self.outDim]
  
  def doPCA(self):
    """Do PCA reduction"""
    
    # On fait une réduction de dimensionalité sur le rouleau
    out, P, E = PCA_nipals_c(self.data, standardize=False, E_matrices=True, PCs=self.outDim)
    
    self.outData = array(out)
  
  def doIsomap(self):
    """Do Isomap reduction"""
    A = isodata()
    A.load_isodata(self.data)
    A.reduce_isodata(isomap_type="K", e=5.0, K=self.k, O=self.outDim)
    
    self.outData = concatenate((A.outdata, zeros((self.n, 1))), 1)
  
  def doDiffusionMaps(self):
    """Do Diffusion Maps reduction
       
       sigma is the diffusion kernel radius
       
       Alpha = 0   is the Graph Laplacian
       Alpha = 0.5 is the Fokker-Plank propagator
       Alpha = 1   is the Laplace-Beltrami operator
    """
    
    self.knn, distKNN = kdtree(self.data).knn(self.data, self.k)
    
    self.sigma = sum(distKNN) / (self.k * self.n)
    
    self.alpha = 0
    self.t     = 3
    
    D = squareform(pdist(self.data))
    K = exp(-(D / self.sigma) ** 2)
    
    p = sum(K, 1)
    
    K1 = K / ((p * transpose(p)) ** self.alpha)
    
    v = sqrt(sum(K1, 1))
    
    A = K1 / (v * transpose(v))
    
    # Compute Markov transition matrix for t timesteps
    while self.t > 1:
      A = dot(A, A)
      self.t -= 1
    
    (U, S, V) = linalg.svd(A)
    
    U = U / (U[:, 0] * ones((1, shape(U)[0])))
    
    self.outData = U[:, 1:(self.outDim + 1)]
  
  
  def D(self):
    t = tile(sum(self.data * self.data, 1), (self.n, 1))
    
    return sqrt(t + transpose(t) - 2 * dot(self.data, transpose(self.data)))
  
  def doMDS(self):
    """Do MDS reduction
    
    Implementation based on the
    Matlab Toolbox for Dimensionality Reduction v0.4b
    > http://www.cs.unimaas.nl/l.vandermaaten
    
    (C) Laurens van der Maaten Maastricht University, 2007
    """
    # Initialise variables
    # Number of iterations
    iterations = 3
    # Learning rate
    lr = 0.05
    # Metric
    r = 2
    
    # Compute pairwise distance matrix
    self.log('Computing dissimilarity matrix...')
    
    distKNN = self.D()
    
    # Normalize distances
    distKNN = distKNN / max(distKNN)
    
    # Compute the variance of the distance matrix
    Dbar = (sum(distKNN) - trace(distKNN)) / self.n / (self.n - 1)
    temp = (distKNN - transpose(dot(Dbar, ones((1, self.n))))) ** 2
    varD = .5 * (sum(temp) - trace(temp))
    
    # Initialize some more variables
    self.outData = random((self.n, self.outDim)) * .01 - .005
    rinv = 1. / r
    
    # Iterate
    self.log('Running MDS...')
    for it in range(0, iterations):
      self.log('.', flush=True, newline=False)
      
      # Randomly permute the objects to determine the order in which they are pinned for this iteration
      pinning_order = permutation(self.n)
      for j in range(0, self.n):
        m = pinning_order[j]
        
        # Move all of the other on each dimension according to the learning rule
        
        pmat = (tile(self.outData[m], (self.n - 1, 1))
          - [self.outData[i] for i in range(0, self.n) if i != m])
        
        dhdum = sum(abs(pmat) ** r, 1) ** rinv
        
        dhmat = lr * transpose(tile(
            (dhdum - transpose([distKNN[m, i] for i in range(0, self.n) if i != m]))
              * (dhdum ** (1 - r)),
          (self.outDim, 1)))
        
        out = dhmat * abs(pmat) ** (r - 1) * sign(pmat)
        
        self.outData = array([(
          self.outData[i] + (out[i] if i < m else out[i - 1])
            if i != m else self.outData[i]) for i in range(0, self.n)])
      
    
  
