/*

   **********************************************************************
   *                                                                    *
   * This file contains the code for an image object for a content-     *
   * based image retrieval application that "summarizes" an image and   *
   * sends the resulting image "signature" to a server for              *
   * classification. The server returns the classification result in    *
   * the form of a Web page containing example images of the input      *
   * image's category.                                                  *
   *                                                                    *
   * The image class defined here is rather specific to this particular *
   * application and is not really designed for broad reuse (just yet). *
   *                                                                    *
   **********************************************************************
*/

/*
   **********************************************************************
   *                                                                    *
   * Gist   -- content-based image retrieval application for the gPhone *
   *                                                                    *
   * Copyright (C) 2011 Yong Zeng                                       *
   *                                                                    *
   * This file is part of Gist.                                       *
   *                                                                    *
   * Gist is free software; you can redistribute it and/or modify it  *
   * under the terms of the GNU General Public License as published by  *
   * the Free Software Foundation; either version 2 of the License, or  *
   * (at your option) any later version.                                *
   *                                                                    *
   * Gist is distributed in the hope that it will be useful, but      *
   * WITHOUT ANY WARRANTY; without even the implied warranty of         *
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU   *
   * General Public License for more details.                           *
   *                                                                    *
   * You should have received a copy of the GNU General Public License  *
   * along with Goggle; if not, write to the Free Software Foundation,  *
   * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.       *
   *                                                                    *
   **********************************************************************
*/

//----------------------- PACKAGE SPECIFICATION -------------------------

package vision.gistcomputing ;

//------------------------------ IMPORTS --------------------------------

// Android utilities
import android.util.Log;

// Java I/O
import java.io.StreamTokenizer;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.Reader;
import java.io.IOException;

// Java containers
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;

//------------------------- CLASS DEFINITION ----------------------------

/**
   The BbofEstimator class implements the following paper within the
   Goggle application's framework:

   Lazebnik, S., Schmid, C., Ponce, J.
   Beyond Bags of Features: Spatial Pyramid Matching for Recognizing
      Natural Scene Catgories
   CVPR, 2006.

   In the paper, the authors describe the use of weak features (oriented
   edge points) and strong features (SIFT descriptors) as the basis for
   classifying images. In this implementation, however, we only concern
   ourselves with strong features clustered into 200 categories (i.e.,
   the vocabulary size is 200 "words" or "vis-terms"). Furthermore, we
   restrict the spatial pyramid used as part of the matching process to 2
   levels.

   We restrict ourselves to the above configuration because, as Lazebnik
   et al. report, it yields the best results (actually, a vocabulary
   size of 400 is better, but not by much).

   To compute the gist vector for an image, we first divide the image
   into 16x16 pixel patches and compute SIFT descriptors for each of
   these patches. We then assign these descriptors to bins corresponding
   to the nearest of the 200 SIFT descriptors (vocabulary) gleaned from
   the training phase. This grid of SIFT descriptor indices is then
   converted into a feature map that specifies the grid coordinates for
   each of the 200 feature types. This map allows us to compute the
   multi-level histograms as described in the paper. The gist vectors we
   are interested in are simply the concatenation of all the multi-level
   histograms into a flat array of numbers.

   Once we have these gist vectors, we can classify images using an SVM.
   The SVM kernel is the histogram intersection function, which takes the
   gist vectors for the input and training images and returns the sum of
   the minimums of each dimension (once again, see the paper for the gory
   details).

   This class, viz., BbofEstimator, only computes gist vectors (i.e.,
   normalized multi-level histograms) given the 200 "word" vocabulary of
   SIFT descriptors to serve as the bins for the histograms. The actual
   training and classification is performed on Goggle's server side.
*/
class BbofEstimator extends GistEstimator {

/// As detailed in the Lazebnik paper, the features used can be either
/// weak (oriented edge points) or strong (SIFT descriptors). In this
/// implementation we concern ourselves with only strong features. By
/// default, these features are clustered into 200 categories, i.e., the
/// size of the feature vocabulary is 200 "words" or "vis-terms."
/// However, this class does support vocabularies of different sizes.
///
/// In the terminology of the paper, this quantity is referred to as the
/// number of channels, M.
private static int NUM_CHANNELS = 200 ;

/// A simple bag-of-features type of algorithm won't pay any attention to
/// the spatial relationships between occurences of different features.
/// The BBoF algorithm, however, does take this into account by arranging
/// the features in a regular grid and then subdividing this grid into
/// finer grids along a spatial pyramid. Feature matches at higher
/// resolutions of the pyramid are given greater weight.
///
/// As mentioned earlier, in this implementation we restrict the
/// above-mentioned spatial matching pyramid to 3 levels, viz., 0, 1 and
/// 2. This setting specifies the top level of the pyramid.
private static final int NUM_LEVELS = 2 ;

/// The gist vector computed by the BBoF estimator consists of counts of
/// feature types at each level of the spatial matching pyramid (see
/// Lazebnik paper and comments in other parts of this file for the
/// low-down). At level 0 of the pyramid, the histogram for each type
/// consists of a single number; at level 1, the histogram has 4 numbers;
/// at level 2, 16; and so on.
///
/// The following constant uses the formula in the paper to compute the
/// total size of the "spatial" histogram for all the levels of the
/// pyramid that are used.
///
/// If we use a two-level pyramid, we will get a total of 1 + 4 + 16 = 21
/// numbers. These 21 numbers will simply be "collapsed" into long
/// (21-dimensional) vectors to create the histograms for each feature
/// type.
private static final
int HISTOGRAM_SIZE_PER_LEVEL = ((1 << (2*NUM_LEVELS + 2)) - 1)/3 ;

/// Just as the histograms for each feature type are formed by
/// concatenating the spatial histograms at each level of the pyramid
/// into one long vector, the entire gist vector is constructed from the
/// "collapsed" spatial histograms of all feature types by stringing them
/// all together.
///
/// For a two-level pyramid and a vocabulary of 200 feature types, we will
/// get 4200-dimensional gist vectors.
private static
int GIST_VECTOR_SIZE = NUM_CHANNELS * HISTOGRAM_SIZE_PER_LEVEL ;

/// All bag-of-features type of algorithms need a common vocabulary of
/// different features that they're looking for in their input. In the
/// case of the BBoF estimator, the vocabulary consists of 200 SIFT
/// descriptors. These SIFT descriptors are obtained by clustering the
/// SIFT descriptors of all the images in the training set into 200
/// clusters and then taking the centroids of these clusters.
private static SiftDescriptor[] m_vocabulary ;

//-------------------------- INITIALIZATION -----------------------------

/// A private constructor because gist estimators are created via an
/// object factory rather than directly by client modules.
///
/// For the Beyond-Bags-of-Features estimator, we want to load the
/// vocabulary of "universal" or "prototypical" SIFT descriptors when it
/// is instantiated. This SIFT vocabulary is stored in a plain text file
/// whose format is as follows:
///
/// It consists of 200 lines (actually, the number of lines equal the
/// number of channels). Each line consists of 128 floating point
/// numbers, which are the components of a SIFT descriptor obtained by
/// clustering all the SIFT descriptors associated with the set of
/// training images.
///
/// The vocabulary is a result of the training process, which must be
/// carried out on the server side of Goggle and then pushed on to the
/// phone.
private BbofEstimator()
{
   if (m_vocabulary == null)
      m_vocabulary = load_vocabulary() ;
}

// This method loads the SIFT vocabulary from the specified plain text
// file into memory.
private SiftDescriptor[] load_vocabulary()
{
   try
   {
      SiftDescriptor[] vocabulary = new SiftDescriptor[NUM_CHANNELS] ;

      Reader R =
         new BufferedReader(new FileReader(Settings.vocabulary_file())) ;
      StreamTokenizer is = new StreamTokenizer(R) ;
      is.parseNumbers() ;

      int[] sift = new int[SiftDescriptor.SIZE] ;
      int i = 0, s = 0 ;
      boolean done = false ;
      while (!done)
      {
         is.nextToken() ;
         switch (is.ttype) {
            case StreamTokenizer.TT_EOF:
               done = true ;
               if (i != NUM_CHANNELS) // prematurely out of data!?!
                  return null ;
               break ;
            case StreamTokenizer.TT_NUMBER:
               sift[s++] = Utils.clamp_byte(Math.round(is.nval)) ;
               if (s == SiftDescriptor.SIZE) {
                  vocabulary[i++] = new SiftDescriptor(sift) ;
                  if (i == NUM_CHANNELS)
                     done = true ;
                  s = 0 ;
               }
               break ;
         }
      }

      R.close() ;
      return vocabulary ;
   }
   catch (IOException e)
   {
      Log.e("Goggle", "unable to load SIFT vocabulary! trouble ahead...") ;
      return null ;
   }
}

/// This method reloads the SIFT vocabulary from the specified file. In
/// order to avoid having to open the file and count the number of lines
/// in it to determine the size of the vocabulary, clients should also
/// supply the size of the vocabulary.
public void reload_vocabulary(int size, String file_name)
{
   if (size < 5 || size > 500) // some basic sanity checks
      return ;

   NUM_CHANNELS = size ;
   GIST_VECTOR_SIZE = size * HISTOGRAM_SIZE_PER_LEVEL ;

   Settings.vocabulary_file(file_name) ;
   m_vocabulary = load_vocabulary() ;
}

//----------------- GIST VECTOR COMPUTATION ALGORITHM -------------------

/// This method applies the steps of the Lazebnik algorithm to the input
/// image. The first step involves computing the strong features, i.e.,
/// the SIFT descriptors laid out on a regular grid superimposed on the
/// input image. Then, we compute a feature map, i.e., a structure that
/// tells us where on the image each feature appears. From this feature
/// map, we go about computing weighted histograms for each level of the
/// spatial matching pyramid and collapse these histograms into one
/// single-dimensional vector. Finally, we normalize this collapsed
/// single-dimensional vector of weighted histograms and return that as
/// the gist vector.
public float[] apply(Image I)
{
   if (m_vocabulary == null)
      return null ;

   BbofSift.Grid G = new BbofSift(I).apply() ;
   //G.save("/data/data/cs546.group7/files/phone_sift_grid.txt") ;
   FeatureMap F = compute_feature_map(G) ;
   float[] gist_vector = compute_spatial_histogram(F, G) ;
   return Utils.normalize(gist_vector) ;
}

private FeatureMap compute_feature_map(BbofSift.Grid G)
{
   final int W = G.width();
   final int H = G.height();

   FeatureMap F = new FeatureMap() ;
   for (int y = 0; y < H; ++y)
   for (int x = 0; x < W; ++x)
   {
      int bin = find_bin(G.get(x, y), m_vocabulary) ;
      FeatureMapEntry it = F.find(bin) ;
      if (it == null)
      {
         FeatureMapEntry L = new FeatureMapEntry(new Point2D(x, y)) ;
         F.insert(bin, L) ;
      }
      else
         it.push_back(new Point2D(x, y)) ;
   }

   return F ;
}

private int find_bin(SiftDescriptor S, SiftDescriptor[] V)
{
   int bin = -1 ;
   float min_dist = Float.MAX_VALUE;

   for (int i = 0;i < V.length;i++)
   {
      float d = dist2(S, V[i], min_dist) ;
      if (d < min_dist) {
         bin = i ;
         min_dist = d ;
      }
   }
   return bin ;
}

private float dist2(SiftDescriptor L, SiftDescriptor R, float min)
{
   float d = 0 ;
   for (int i = 0; i < SiftDescriptor.SIZE; ++i) {
      d += Math.pow(L.get(i) - R.get(i), 2) ;
      if (d > min)
         return min ;
   }
   return d ;
}

private float[] compute_spatial_histogram(FeatureMap F, BbofSift.Grid G)
{
   float[] gist = new float[GIST_VECTOR_SIZE] ;
   Image.Dims sift_grid_size = new Image.Dims(G.width(), G.height()) ;
   int target = 0 ;
   for (int m = 0 ; m < NUM_CHANNELS ; ++m)
   {
      FeatureMapEntry it = F.find(m) ;
      if (it == null) // no instances of feature type m in input image
         target += HISTOGRAM_SIZE_PER_LEVEL ;
      else // count instances of feature type m at each level of spatial pyr.
      {
         List<Point2D> coords = it.getList() ;
         for (int l = 0 ; l <= NUM_LEVELS ; ++l) {
            Histogram h = spatial_counts_vector(coords,l,sift_grid_size)  ;
            System.arraycopy(h.getArray(), 0, gist, target, h.getSize()) ;
            target += h.getSize()  ;
         }
      }
   }

   return gist ;
}

Histogram
spatial_counts_vector(List<Point2D> coords, int l, Image.Dims source_grid_size)
{
   int L = NUM_LEVELS;
   int N = (int) Math.pow(2, l);// 1 << l ;

   Histogram H = new Histogram(N,N);
   if (N == 1)
      H.setVal(0,0,(float) (coords.size() / (Math.pow(2, L))));
   else
   {
      for(int i = 0;i<coords.size();i++)
         H.update_spatial_histogram(coords.get(i), source_grid_size);
      H.transform(l, L);
   }
   return H;
}

private class Histogram{
   private int w;
   private int h;
   private float[] grid;
   public Histogram(int w,int h) {
      this.w = w;
      this.h = h;
      grid = new float[w * h];
   }

   public int getWidth() {
      return w;
   }

   public int getHeight() {
      return h;
   }
   public void incCell(Point2D p){
      grid[p.x+p.y*w]++;
   }
   public void setVal(int x,int y,float value){
      grid[x+y*w] = value;
   }
   public void update_spatial_histogram(Point2D c,Image.Dims d){
      float Sx = d.width / (float) (w);
      float Sy = d.height / (float) (h);
      Point2D p = new Point2D((int) (c.x / Sx), (int) (c.y / Sy));
      incCell(p);
   }
   public void transform(int L,int l){
      for(int i = 0;i<h;i++){
         for(int j = 0;j<w;j++){
            grid[j+i*w]/=  1 << (L - l + 1);
         }
      }
   }
   public float[] getArray(){
      return grid;
   }
   public int getSize(){
      return w*h;
   }
}

//--------------------------- FEATURE MAPS ------------------------------

private class FeatureMap {
   public FeatureMapEntry find(int index) {
      if (maps.containsKey(index))
         return maps.get(index);
      return null;
   }
   public FeatureMap() {
      maps = new TreeMap<Integer,FeatureMapEntry>();
   }
   public void insert(int key, FeatureMapEntry value) {
      maps.put(key, value);
   }
   private Map<Integer,FeatureMapEntry> maps;
}

class FeatureMapEntry {
   private List<Point2D> entryList;

   public FeatureMapEntry() {
      entryList = new ArrayList<Point2D>();
   }

   public FeatureMapEntry(Point2D coord) {
      entryList = new ArrayList<Point2D>();
      entryList.add(coord);
   }

   public void push_back(Point2D coord) {
      entryList.add(coord);
   }

   public List<Point2D> getList() {
      return entryList;
   }
}

/// Convenience class to hold x and y coord together in a single
/// structure.
public static class Point2D {
   public int x, y;

   public Point2D(int x, int y) {
      this.x = x;
      this.y = y;
   }
}

//------------------------------ FACTORY --------------------------------

/// This inner class implements a factory for producing instances of its
/// enclosing GistEstimator.
private static class Factory implements GistEstimator.Factory {

/// All gist estimator sub-factories must override this method and return
/// a concrete instance of their associated estimators.
public GistEstimator create()
{
   return new BbofEstimator() ;
}

} // end of inner class BbofEstimator.Factory

// This static block ensures that the above factory gets registered with
// the GistEstimator registry. All concrete gist estimators must provide
// such a section.
static
{
   GistEstimator.register("BbofEstimator", new Factory()) ;
}

//-----------------------------------------------------------------------

} 
