// Accelerated Dual Decomposition - An implementation of a MAP inference
// algorithm. See ai.stanford.edu/~vjojic/fastdd.pd
//
// Copyright 2011 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Authors: nadavs@google.com (Nadav Samet)
//          noam.levy1@stanford.edu (Noam Levy)
//
// A wrapper around AccleratedDD interface for integration with the middlebury
// MRF framework.

#include "AcceleratedDD_MRF.h"

#include <assert.h>
#include <fstream>
#include <iostream>
#include <math.h>
#include <stdexcept>
#include <stdlib.h>
#include <string.h>
#include <string>

using namespace std;

AcceleratedDD_MRF::AcceleratedDD_MRF(
    int variables, int nLabels,
    EnergyFunction *eng,
    double epsilon,
    const string &logfile,
    AcceleratedDD::LOGLEVEL loglevel) :
        MRF(variables, nLabels, eng), epsilon_(epsilon) {
  CommonConstructor(variables, nLabels, logfile, loglevel);
}

AcceleratedDD_MRF::AcceleratedDD_MRF(
    int width, int height, int nLabels,
    EnergyFunction *eng,
    double epsilon,
    const string &logfile,
    AcceleratedDD::LOGLEVEL loglevel) :
        MRF(width, height, nLabels, eng), epsilon_(epsilon) {
  CommonConstructor(width * height, nLabels, logfile, loglevel);
}


void AcceleratedDD_MRF::CommonConstructor(
    int variables, int nLabels,
    const string &logfile,
    AcceleratedDD::LOGLEVEL loglevel) {
  problem_.variables = variables;
  problem_.variable_range = new size_t[variables]();
  for (size_t i = 0; i < problem_.variables; ++i) {
    problem_.variable_range[i] = nLabels;
  }
  answer_ = new Label[problem_.variables]();

  if (logfile.empty()) return;
  ostream *logstream = NULL;
  if (logfile == "stdout") {
    logstream = &cout;
  } else if (logfile == "stderr") {
    logstream = &cerr;
  } else {
    logstream = new ofstream(logfile.c_str(), ios_base::out);
    if (!logstream->good()) {
      cerr << "Fatal error: Unable to open log stream." << endl;
      exit(1);
    }
  }
  AcceleratedDD::SetLogStream(logstream);
  AcceleratedDD::SetLogLevel(loglevel);
}

AcceleratedDD_MRF::~AcceleratedDD_MRF()
{
  AcceleratedDD::DeleteAlgorithmState(problem_, state_);
  for (size_t i = 0; i < problem_.factors.size(); i++) {
    delete problem_.factors[i];
    problem_.factors[i] = NULL;
  }
  delete[] problem_.variable_range;
  delete[] answer_;
}

MRF::EnergyVal AcceleratedDD_MRF::dataEnergy() {
  return -sumFactors(dataFactors_);
}

// returns the smoothness part of the energy
MRF::EnergyVal AcceleratedDD_MRF::smoothnessEnergy() {
  return -sumFactors(pairwiseFactors_);
}

MRF::EnergyVal AcceleratedDD_MRF::sumFactors(
    const vector<AcceleratedDD::Factor*> &factors) const {
  double result = 0;
  size_t *a = new size_t[problem_.variables]();
  for (size_t var = 0; var < problem_.variables; ++var) {
    a[var] = answer_[var];
  }
  for (size_t i = 0; i < factors.size(); ++i) {
    result += factors[i]->Evaluate(a);
  }
  delete[] a;
  return result;
}

void AcceleratedDD_MRF::CheckGrid() {
  if (!m_grid_graph) {
    throw new runtime_error(
        "Grid graph expected");
  }
}

void AcceleratedDD_MRF::CheckNonGrid() {
  if (m_grid_graph) {
    throw new runtime_error(
        "Expectedd non-grid graph");
  }
}

// unimplemented
void AcceleratedDD_MRF::setNeighbors(int pix1, int pix2, CostVal weight) {
  CheckNonGrid();
  if (weight != 1.0) {
    throw new runtime_error(
        "Weight must be equal 1.");
  }
  edges_.push_back(make_pair(pix1, pix2));
}

void AcceleratedDD_MRF::optimizeAlg(int nIterations) {
  for (int i = 0; i < nIterations; ++i) {
    AcceleratedDD::Iterate(problem_, mu, L, state_);
    //cout << "Assignment: ";
    for (size_t var = 0; var < problem_.variables; ++var) {
      // NOTE: We assume that we added the singleton factors first so p[i] corresponds
      // to the singleton factor of variable i.
      int max_a = 0;
      for (size_t a = 1; a < problem_.variable_range[var]; ++a) {
        if (state_->p[var][0][a] > state_->p[var][0][max_a]) {
          max_a = a;
        }
      }
      // cout << "answer[" << var << "] = "<< max_a << endl;
      answer_[var] = max_a;
      //cout << answer_[var] << " ";
    }
    cout << endl;
    cout << "Energy: " << totalEnergy() << "  Dual energy " << state_->dual_value << endl;
  }
}

// Returns pointer to array of size nPixels. Client may then read/write
// solution (but not deallocate array).
MRF::Label* AcceleratedDD_MRF::getAnswerPtr() {
	return answer_;
}

// returns the label of the input pixel
MRF::Label AcceleratedDD_MRF::getLabel(int pixel) {
  return (MRF::Label)answer_[pixel];
}

// sets label of a pixel
void AcceleratedDD_MRF::setLabel(int pixel,Label label) {
  answer_[pixel] = label;
}

// sets all the labels to zero
void AcceleratedDD_MRF::clearAnswer() {
  memset(answer_, 0, problem_.variables * sizeof(size_t));
  throw new runtime_error("Not implemented");
}

// unimplemented
void AcceleratedDD_MRF::setParameters(int numParam, void *param) {
  throw new runtime_error("Not implemented");
}

void AcceleratedDD_MRF::setData(DataCostFn dcost) {
  size_t p1;
  AcceleratedDD::Scope scope;
  for (size_t p1 = 0; p1 < problem_.variables; ++p1) {
    scope.clear();
    scope.push_back(p1);
    AcceleratedDD::TableFactor *factor = new AcceleratedDD::TableFactor(
        scope, &problem_);
    size_t a[1];
    for (a[0] = 0; a[0] < (size_t)m_nLabels; ++a[0]) {
      factor->Assign(a, -dcost(p1, a[0]));
    }
    problem_.factors.push_back(factor);
    dataFactors_.push_back(factor);
  }
}

void AcceleratedDD_MRF::setData(CostVal* data) {
  throw new runtime_error("Not implemented");
}

void AcceleratedDD_MRF::setSmoothness(CostVal* V) {
  throw new runtime_error("Not implemented");
}

inline AcceleratedDD::Factor *CreatePairwiseFactor(
    MRF::SmoothCostGeneralFn cost,
    AcceleratedDD::VariableId var1,
    AcceleratedDD::VariableId var2,
    AcceleratedDD::OptimizationProblem *problem) {
  AcceleratedDD::Scope scope;
  scope.push_back(var1);
  scope.push_back(var2);
  AcceleratedDD::TableFactor *factor = new AcceleratedDD::TableFactor(
      scope, problem);
  size_t a[2];
  for (a[0] = 0; a[0] < problem->variable_range[var1]; ++a[0]) {
    for (a[1] = 0; a[1] < problem->variable_range[var2]; ++a[1]) {
      factor->Assign(a, -cost(var1, var2, a[0], a[1]));
    }
  }
  return factor;
}

// Utility function that creates pairwise factors given a cost function.
// x_max and y_max define the boundaries of the scan. p2_offset is the
// distance between the two pixels in the flat array (which means 1 for
// horizontal pair and m_width for vertical pair).
void AcceleratedDD_MRF::InnerAddGridPairwiseFactors(
    MRF::SmoothCostGeneralFn cost,
    int width,
    int x_max,
    int y_max,
    int p2_offset) {
  CheckGrid();
  for (int y = 0; y < y_max; ++y) {
    for (int x = 0; x < x_max; ++x) {
      AcceleratedDD::Factor *factor = CreatePairwiseFactor(
          cost,
          y * width + x,
          y * width + x + p2_offset, &problem_);
      problem_.factors.push_back(factor);
      pairwiseFactors_.push_back(factor);
    }
  }
}

void AcceleratedDD_MRF::InnerAddEdgesFactors(MRF::SmoothCostGeneralFn cost) {
  CheckNonGrid();
  for (size_t i = 0; i < edges_.size(); ++i) {
      AcceleratedDD::Factor *factor = CreatePairwiseFactor(
          cost,
          edges_[i].first,
          edges_[i].second, &problem_);
      problem_.factors.push_back(factor);
      pairwiseFactors_.push_back(factor);
  }
}

void AcceleratedDD_MRF::setSmoothness(MRF::SmoothCostGeneralFn cost) {
  if (m_grid_graph) {
    // Horizontal
    InnerAddGridPairwiseFactors(cost, m_width,
                            m_width-1, m_height, 1);
    // Vertical
    InnerAddGridPairwiseFactors(cost, m_width,
                            m_width, m_height-1, m_width);
  } else {
    InnerAddEdgesFactors(cost);
  }
}

void AcceleratedDD_MRF::setSmoothness(int smoothExp,CostVal smoothMax, CostVal lambda) {
  throw new runtime_error("Not implemented");
}

void AcceleratedDD_MRF::setCues(CostVal* hCue, CostVal* vCue) {
  throw new runtime_error("Not implemented");
}

void AcceleratedDD_MRF::initializeAlg() {
  state_ = AcceleratedDD::CreateInitialAlgorithmState(problem_);
  L = 0;
  for (size_t c = 0; c < problem_.factors.size(); ++c) {
    L = L + log((double)problem_.factors[c]->GetDimension());
  }
  L = 2 * L / epsilon_;
  mu = 1 / L;

  cout << "Initializing algorithm with mu="<< mu << ", L="<<L << endl;
}


double AcceleratedDD_MRF::lowerBound()
{
  return -state_->dual_value;
}
