// Accelerated Dual Decomposition - An implementation of a MAP inference
// algorithm. See ai.stanford.edu/~vjojic/fastdd.pd
//
// Copyright 2011 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Authors: nadavs@google.com (Nadav Samet)
//          noam.levy1@stanford.edu (Noam Levy)
//
// Implementation of the AcceleratedDD algorithm.

#include "AcceleratedDD.h"

#include <limits>
#include <algorithm>
#include <iostream>
#include <math.h>
#include <string.h>

using namespace std;

namespace AcceleratedDD {

  static ostream *g_logstream = NULL;
  static LOGLEVEL g_loglevel = INFO;

  inline double dmax(double a, double b) {
    return a > b ? a : b;
  }

  void SetLogStream(ostream *logstream) {
    g_logstream = logstream;
  }

  void SetLogLevel(LOGLEVEL loglevel) {
    g_loglevel = loglevel;
  }

  #define LOG(level) if ((g_logstream!=NULL) && (g_loglevel <= level)) (*g_logstream)

  TableFactor::TableFactor(
      const Scope &scope,
      const OptimizationProblem *optimization_problem)
      : scope_(scope),
        optimization_problem_(optimization_problem) {
    state_size_ = 1;
    for (size_t i = 0; i < scope.size(); ++i) {
      state_size_ *= optimization_problem->variable_range[i];
    }
    entries_ = new double[state_size_]();
  }

  TableFactor::~TableFactor() {
    delete[] entries_;
  }

  const Scope& TableFactor::GetScope() const {
    return scope_;
  }

  size_t TableFactor::GetStateSize() const {
    return state_size_;
  }

  size_t TableFactor::GetDimension() const {
    size_t dim = 0;
    for (size_t i = 0; i < scope_.size(); ++i) {
      dim += optimization_problem_->variable_range[scope_[i]];
    }
    return dim;
  }

  double TableFactor::Evaluate(const size_t *assignment) const {
    size_t index = assignment[scope_[0]];
    for (size_t i = 1; i < scope_.size(); ++i) {
      index *= optimization_problem_->variable_range[scope_[i]];
      index += assignment[scope_[i]];
    }
    return entries_[index];
  }

  void TableFactor::Assign(const size_t *scoped_assignment, double value) {
    size_t index = scoped_assignment[0];
    for (size_t i = 1; i < scope_.size(); ++i) {
      index *= optimization_problem_->variable_range[scope_[i]];
      index += scoped_assignment[i];
    }
    entries_[index] = value;
  }

  // Allocate a three dimensional array appropraite for this problem.
  // See DualArray.
  DualArray AllocateDualArray(const OptimizationProblem &p) {
    DualArray result = new double**[p.factor_count()];
    for (size_t c = 0; c < p.factor_count(); ++c) {
      const Scope &scope = p.factors[c]->GetScope();
      result[c] = new double*[scope.size()];
      for (size_t i = 0; i < scope.size(); ++i) {
        result[c][i] = new double[p.variable_range[scope[i]]]();
      }
    }
    return result;
  }

  void DeleteDualArray(DualArray &array, const OptimizationProblem &p) {
    for (size_t c = 0; c < p.factor_count(); ++c) {
      const Scope &scope = p.factors[c]->GetScope();
      for (size_t i = 0; i < scope.size(); ++i) {
        delete[] array[c][i];
      }
      delete[] array[c];
    }
    delete[] array;
  }

  void DeleteAlgorithmState(const OptimizationProblem &p,
                            AlgorithmState *state) {
    DeleteDualArray(state->p, p);
    DeleteDualArray(state->nu, p);
    DeleteDualArray(state->eta, p);
    DeleteDualArray(state->zeta1, p);
    DeleteDualArray(state->zeta2, p);
    delete[] state->s_c;
    delete state->presence_lists;
    delete state;
  }

  AlgorithmState *CreateInitialAlgorithmState(const OptimizationProblem &p) {
    AlgorithmState *state = new AlgorithmState;
    state->p = AllocateDualArray(p);
    state->nu = AllocateDualArray(p);
    state->eta = AllocateDualArray(p);
    state->zeta1 = AllocateDualArray(p);
    state->zeta2 = AllocateDualArray(p);
    state->zeta = &state->zeta1;
    state->zetaPrev = &state->zeta2;
    state->theta = 1.0;
    state->s_c = new double[p.factor_count()]();
    state->dual_value = 0;
    state->iteration = 0;
    state->nonsmooth_lower_bound = 0;
    state->presence_lists = new vector<FactorPresenceList>(p.variables);
    for (size_t c = 0; c < p.factors.size(); ++c) {
      const Scope &scope = p.factors[c]->GetScope();
      FactorPresence fp;
      fp.factor_index = c;
      for (size_t i = 0; i < scope.size(); ++i) {
        fp.index = i;
        (*state->presence_lists)[scope[i]].push_back(fp);
      }
    }
    state->dimension = 0;
    for (size_t c = 0; c < p.factors.size(); ++c) {
      state->dimension += log((double)p.factors[c]->GetDimension());
    }

    return state;
  }

  // Compute a convex combination of x and y and parameter theta and store the
  // result in result. It is ok if result actually points at either x or y.
  void ComputeConvexCombination(const OptimizationProblem &p,
                                const DualArray &x,
                                const DualArray &y,
                                double theta,
                                DualArray *result) {
    for (size_t c = 0; c < p.factor_count(); ++c) {
      const Scope &scope = p.factors[c]->GetScope();
      for (size_t i = 0; i < scope.size(); ++i) {
        for (size_t a = 0; a < p.variable_range[scope[i]]; ++a) {
          (*result)[c][i][a] = (1-theta) * x[c][i][a] +
                               theta * y[c][i][a];
        }
      }
    }
  }

  // Computes p^c_mu(x_i,a) and store in the state variable.
  void UpdateP(const OptimizationProblem &p, double mu, AlgorithmState *state) {
    const size_t *variable_range = p.variable_range;

    size_t *assignment = new size_t[p.variables]();
    state->dual_value = 0;
    state->nonsmooth_lower_bound = 0;
    for (size_t c = 0; c < p.factor_count(); ++c) {
      // Zero p[c]
      const Factor *factor = p.factors[c];
      const Scope &scope = factor->GetScope();

      for (size_t i = 0; i < scope.size(); i++) {
        memset(state->p[c][i], 0, sizeof(double) * variable_range[scope[i]]);
      }

      double log_first = 0;
      // Constant that will multiply all terms for numerical stability
      for (size_t i = 0; i < scope.size(); ++i) {
        assignment[scope[i]] = 0;
      }

      for (size_t l = 0; l < factor->GetStateSize(); ++l) {
        double pmuc = factor->Evaluate(assignment);
        for (size_t i = 0; i < scope.size(); ++i) {
          pmuc += state->nu[c][i][assignment[scope[i]]];
        }
        if (l==0)
          log_first = pmuc/mu;
        else
          log_first = max(log_first, pmuc/mu);

        IncrementAssignment(assignment, variable_range, scope);
      }
      LOG(INLOOP) << "log_first[" << c << "]=" << log_first << endl;

      double P_total = 0;
      double ns_s_c = numeric_limits<double>::min();
      for (size_t l = 0; l < factor->GetStateSize(); ++l) {
        // Calculate for each assignment of the factor variables the value of
        // p_mu^c (assignment)
        double pmuc = factor->Evaluate(assignment);
        for (size_t i = 0; i < scope.size(); ++i) {
          pmuc += state->nu[c][i][assignment[scope[i]]];
        }
        ns_s_c = dmax(ns_s_c, pmuc);
        LOG(INNERMOST_LOOP) << "log(pmuc["<<c<<","<<l<<"])" << pmuc/mu;

        pmuc = exp(pmuc / mu - log_first);
        LOG(INNERMOST_LOOP) << " pmuc=" << pmuc;
        // We now have that v(assignment)
        //
        P_total += pmuc;
        LOG(INNERMOST_LOOP) << " | P_total = " << P_total << endl;
        // While we iterate over all assignments we want to marginalize
        // for each variable:
        for (size_t i = 0; i < scope.size(); ++i) {
          state->p[c][i][assignment[scope[i]]] += pmuc;
        }
        IncrementAssignment(assignment, variable_range, scope);
      }

      // Normalize p.
      for (size_t i = 0; i < scope.size(); ++i) {
        for (size_t a = 0; a < variable_range[scope[i]]; ++a) {
          state->p[c][i][a] /= P_total;
          LOG(INNERMOST_LOOP) << "P["<<c<<","<<i<<","<<a<<"] = " << state->p[c][i][a] << endl;
        }
      }

      // Calculate s_c as per the formula on the bottom left of page 4 of the paper
      // Since we have divided every component of P by exp(log_first), we need to remultiply
      // we do this outside the log for numertical stability
      state->s_c[c] =mu *( log(P_total / factor->GetDimension()) + log_first );
      state->dual_value += state->s_c[c];
      state->nonsmooth_lower_bound += ns_s_c;
      // And on to the next factor...
    }
    delete[] assignment;
  }

  // Projects into zeta, as in the paper.
  void Project(const OptimizationProblem &p,
               double L,
               AlgorithmState *state) {
    const size_t *variable_range = p.variable_range;
    for (size_t i = 0; i < p.variables; ++i) {
      const FactorPresenceList &i_factors = (*state->presence_lists)[i];
      for (size_t a = 0; a < variable_range[i]; ++a) {
        double gamma_i_a = 0;
        for (FactorPresenceList::const_iterator fp = i_factors.begin();
             fp != i_factors.end();
             ++fp) {
          gamma_i_a += (state->theta * L *
                        (*state->zetaPrev)[fp->factor_index][fp->index][a] -
                        state->p[fp->factor_index][fp->index][a]);
        }
        LOG(INNERMOST_LOOP) << "gamma("<<i<<","<<a<<") == " << gamma_i_a << endl;
        gamma_i_a /= i_factors.size();
        for (FactorPresenceList::const_iterator fp = i_factors.begin();
             fp != i_factors.end();
             ++fp) {
          (*state->zeta)[fp->factor_index][fp->index][a] -= 
              ((state->p[fp->factor_index][fp->index][a] + gamma_i_a) /
               (state->theta * L));
          LOG(INNERMOST_LOOP)
              << "(z,p)"
              << "[" << fp->factor_index << "]"
              << "[" << i << "]"
              << "[" << a << "] = ("
              << state->zeta[fp->factor_index][fp->index][a] 
              << ", "
              << state->p[fp->factor_index][fp->index][a]
              << ")" << endl;
        }
      }
    }
  }

  void Iterate(const OptimizationProblem &p, double mu, double L,
               AlgorithmState *state) {
    LOG(INFO) << "*************** NEW ITERATION *****************" << endl;
    double epsilon = 100000000.0 / (dmax(sqrt(state->iteration), 100));
    L = 2 * state->dimension / epsilon;
    mu = 1 / L;
    cerr << "Epsilon = " << epsilon << endl;

    /*ComputeConvexCombination(p,
                             state->nu,
                             state->zeta,
                             state->theta,
                             &state->eta);*/
    if (state->zeta == &state->zeta1)
    {
      state->zeta = &state->zeta2;
      state->zetaPrev = &state->zeta1;
    }
    else
    {
      state->zeta = &state->zeta2;
      state->zetaPrev = &state->zeta1;
    }

    UpdateP(p, mu, state);
    Project(p, L, state);
    ComputeConvexCombination(p,
                             state->nu,
                             (*state->zeta),
                             state->theta,
                             &state->nu);
    double t2 = state->theta * state->theta;
    double t4 = t2 * t2;
    state->theta = (sqrt(t4 + 4*t2) - t2) / 2;
    ++state->iteration;
  }
}
