/*
 * anls_decentralized.cc
 *
 * Copyright 2013 Long Tran (ltran3@gatech.edu)
 *
 * Decentralized ALS, solving saddle point of the Lagrangian
 *   L = sum |A_i - W_iH_i^T|^2 + <Lambda_i, W_i-U_i> + <Pi_i, H_i-V_i>
 *        + 0.5*rho*(|W_i-U_i|^2+|H_i-V_i|^2)
 *        + sum_E <Delta_ij, V_i-V_j> + 0.5*rho*sum_E |V_i-V_j|^2
 *   subject to U_i >= 0, V_i >= 0
 *   where ij \in E means there is an edge from node i to node j
 * 
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
 * MA 02110-1301, USA.
 * 
 */

#include <armadillo>
#include <boost/archive/text_oarchive.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/mpi.hpp>

#include "helper/helper.h"
#include "helper/StopWatch.h"
#include "helper/FileAccess.h"
extern boost::mpi::communicator* p_world;
extern po::variables_map vm;
#include "helper/mpi_helper.h"
#include "graph_helper.h"
#include "helper/arma_helper.h"

#define MSG_TAG_A 0
#define MSG_TAG_V 1
#define MSG_TAG_D 2

using namespace arma;

class DecentralizedLessANLS {
public:
  mat A, W, H;
  // iteration information
  std::vector<Iteration> iterInfo;
protected:
  int m, n, k;
  double rho, alpha;
  double rho_incr, rho_decr;
  double l1, l2;
  int maxiter, interval;
  bool normalizing;
  std::vector<int> neighbors;
  int n_neighbor;
  GraphHelper graph;
  mat U, V, Lambda, Pi, U_old, V_old;
  std::vector<mat> V_nb, Delta;

  StopWatch watch, clock;
  TimeDuration runtime[2], total_time[2];

public:
  DecentralizedLessANLS()
  {
    PO_REQUIRE(vm, "iter");
    PO_REQUIRE(vm, "graph");
    
    PO_VARIABLE_VALUE(rho, vm, "rho", double); // ADMM parameter
    PO_VARIABLE_VALUE(rho_incr, vm, "rho-incr", double); // ADMM parameter
    PO_VARIABLE_VALUE(rho_decr, vm, "rho-decr", double); // ADMM parameter
    PO_VARIABLE_VALUE(alpha, vm, "alpha", double); // dual ascent step
    l1 = (vm.count("l1")) ? vm["l1"].as<double>() : 0; // l1 regularization
    l2 = (vm.count("l2")) ? vm["l2"].as<double>() : 0; // l2 regularization
    PO_VARIABLE_VALUE(maxiter, vm, "iter", int);
    PO_VARIABLE_VALUE(interval, vm, "interval", int);
    normalizing = vm.count("normalize");

    graph.BuildGraph(neighbors);
    n_neighbor = SZ(neighbors);
  }

  void LoadData() 
  {
    PO_REQUIRE(vm, "data");
    PO_REQUIRE(vm, "k");
    PO_VARIABLE(data, vm, "data", std::string);
    PO_VARIABLE_VALUE(k, vm, "k", int);
    
    
    std::stringstream ss;
    ss << data << "_" << MPI_RANK;
    A.load(ss.str(), arma_binary);
    m = A.n_rows;
    n = A.n_cols;
  }

  void UpdateW()
  {
    W = trans(solve(trans(H)*H+rho*eye(k,k), trans(A*H-Lambda+rho*U)));
  }

  void UpdateH()
  {
    double s = alpha*n_neighbor + (MPI_RANK==0) ? rho : 0;
    mat AA = s*eye(k,k) + trans(W)*W;
    mat BB = trans(A)*W;
    if (MPI_RANK == 0) BB += (rho*V - Pi);
    double coeff = graph.IsLeftSide()? 1 : -1;
    for (int i = 0; i < n_neighbor; i++)
      BB += alpha*V_nb[i] - coeff*Delta[i];

    H = trans(solve(AA, trans(BB)));
    MSG(trace(AA));
  }

  void UpdateU()
  {
    U = W + Lambda/rho; 
    zero_threshold(U);
  }

  void UpdateV1()
  {
    if (MPI_RANK == 0) {
      V = rho*H + Pi - l1*ones<mat>(n,k);
      double gamma = l2 + rho;
      V /= gamma;
      if (normalizing)
	normalize_column_equal_1(V);
      else
	zero_threshold(V);
    }
  }

  void SendReceiveH()
  {
    mpi::request reqs[n_neighbor];
    if (graph.IsLeftSide()) { // left
      // compute V
      UpdateH();

      // send H to neighbors (to right)
      for (int j = 0; j < n_neighbor; j++)
        MPI_ISEND_VERBOSE(reqs[j], neighbors[j], MSG_TAG_V, H, vm, "H["<<MPI_RANK<<"]");
      mpi::wait_all(reqs, reqs+n_neighbor);

      // receive H_neighbors (from right)
      for (int j = 0; j < n_neighbor; j++)
        MPI_IRECV_VERBOSE(reqs[j], neighbors[j], MSG_TAG_V, V_nb[j], vm, "H["<<neighbors[j]<<"]");
      mpi::wait_all(reqs, reqs+n_neighbor);
    }
    else { // right
      // receive H_neighbors (from left)
      for (int i = 0; i < n_neighbor; i++)
        MPI_IRECV_VERBOSE(reqs[i], neighbors[i], MSG_TAG_V, V_nb[i], vm, "H["<<neighbors[i]<<"]");
      mpi::wait_all(reqs, reqs+n_neighbor);
      MSG_VERBOSE(vm, MPI_RANK << " Received V");

      // compute V
      UpdateH();
      
      // send V to neighbors (to left)
      for (int i = 0; i < n_neighbor; i++)
        MPI_ISEND_VERBOSE(reqs[i], neighbors[i], MSG_TAG_V, H, vm, "V["<<MPI_RANK<<"]");
      mpi::wait_all(reqs, reqs+n_neighbor);
    }
  }

  void UpdateLambdaPiDelta()
  {
    
    //double rate = /*alpha/sqrt(iter+1)**/ rho;
    Lambda += rho*(W-U);
    if (MPI_RANK == 0) Pi += rho*(H-V);
    if (!graph.IsLeftSide()) { 
      for (int i = 0; i < n_neighbor; i++) Delta[i] += alpha*(V_nb[i]-V);
    }
    else { 
      for (int j = 0; j < n_neighbor; j++) Delta[j] += alpha*(V-V_nb[j]);
    }
  }

  void RecordProgress(int iter)
  {
    double res = norm(A-W*trans(H), "fro"), sum_res;
    res *= res;
    mpi::reduce(*p_world, res, sum_res, std::plus<double>(), 0);
    if (MPI_RANK == 0) {
      Iteration info(iter, sqrt(sum_res), runtime[0]+runtime[1], runtime[0], runtime[1]);
      iterInfo.push_back(info);
      if (iter % interval == 0)
	MSG("it=" << iter << " |res|=" << sqrt(sum_res)  << " |H|=" << norm(H, "fro")
	    << " d=" << SECONDS(info.duration)
	    << " alpha=" << alpha << " rho=" << rho);
    }
  }

  void UpdateRhoAlpha(int iter)
  {
    if (iter < 1) return;
    double w_norm = norm(W-U,"fro"), h_norm = norm(H-V, "fro"), sum_w, sum_h;
    w_norm *= w_norm;
    h_norm *= h_norm;
    double v_norm = 0, sum_v;
    for (int i = 0; i < n_neighbor; i++) {
      double tmp = norm(V-V_nb[i], "fro");
      v_norm += tmp*tmp;
    }
    v_norm /= 2;
    
    mpi::reduce(*p_world, w_norm, sum_w, std::plus<double>(), 0);
    mpi::reduce(*p_world, h_norm, sum_h, std::plus<double>(), 0);
    mpi::reduce(*p_world, v_norm, sum_v, std::plus<double>(), 0);

    double u_norm = norm(U-U_old, "fro"), sum_u;
    double va_norm = norm(V-V_old, "fro"), sum_va;
    double vr_norm = graph.IsLeftSide() ? 0 : va_norm, sum_vr;
    u_norm *= rho; u_norm *= u_norm;
    va_norm *= rho; va_norm *= va_norm;
    vr_norm *= (alpha); vr_norm *= vr_norm; vr_norm *= (double)n_neighbor/2;

    mpi::reduce(*p_world, u_norm, sum_u, std::plus<double>(), 0);
    mpi::reduce(*p_world, va_norm, sum_va, std::plus<double>(), 0);
    mpi::reduce(*p_world, vr_norm, sum_vr, std::plus<double>(), 0);

    if (MPI_RANK == 0) {
      double primal1 = sum_w + sum_h;
      double primal2 = sum_v;
      double primal = primal1+primal2;

      double dual1 = sum_u + sum_va;
      double dual2 = sum_vr;
      double dual = dual1+dual2;

      if (iter % interval == 0) MSG("p1=" << primal1 << " d1=" << dual1 << " p2=" << primal2 << " d2=" << dual2);

      if (primal2 > 100*dual2) alpha *= rho_incr;
      else if (dual2 > 100*primal2) alpha *= rho_decr;

      if (primal1 > 100*dual1) rho *= rho_incr;
      else if (dual1 > 100*primal1) rho *= rho_decr;

      //if (primal > 1000*dual) alpha *= rho_incr;
      //else if (dual > 1000*primal) alpha *= rho_decr;

      /*
      if (primal > 100*dual) {
	rho *= rho_incr;
	alpha *= rho_incr;
      }
      else if (dual > 100*primal) {
	rho *= rho_decr;
	alpha *= rho_decr;
      }
      */

      if (rho > 1e3) rho = 1e3;
      if (rho < 1e-2) rho = 1e-2;
      if (alpha > 1e3) alpha = 1e3;
      if (alpha < 1e-2) alpha = 1e-2;
    }
    mpi::broadcast(*p_world, rho, 0);
    mpi::broadcast(*p_world, alpha, 0);
  }

  void Initialize() 
  {
    W = randu<mat>(m, k); 
    H = randu<mat>(n, k); 

    U = randu<mat>(m, k); 
    V = randu<mat>(n, k); 
    Lambda = zeros<mat>(m, k); 
    Pi = zeros<mat>(n, k); 
    V_nb = std::vector<mat>(n_neighbor, zeros<mat>(n, k));
    Delta = std::vector<mat>(n_neighbor, zeros<mat>(n, k));
  }

  void SolveANLS() 
  {
    Initialize();
								
    watch.Reset();
    clock.Reset();
    iterInfo.clear();
    //mpi::request reqs[n_neighbor], reqs_delta[n_neighbor];
    for (int iter=0; iter < maxiter; iter++) {
      RecordProgress(iter);

      UpdateW();
      SendReceiveH();
      UpdateU();
      UpdateV1();
      UpdateLambdaPiDelta();
    
      //UpdateRhoAlpha(iter);
    }
    RecordProgress(maxiter);
    if (MPI_RANK == 0) MSG(watch.GetTotalDuration());
    mpi::reduce(*p_world, runtime[0], total_time[0], std::plus<TimeDuration>(), 0);
    mpi::reduce(*p_world, runtime[1], total_time[1], std::plus<TimeDuration>(), 0);
    if (MPI_RANK == 0)
      for (int i = 0; i < 2; i++) MSG("clock " << i << " " << total_time[i]);
    W = U;
    H = V;
  }
};


void run_dist_anls_less()
{
  if (MPI_RANK == 0) MSG("Decentralized ANLS Less");
  DecentralizedLessANLS a;
  a.LoadData();
  a.SolveANLS();

  if (vm.count("output") && vm.count("graph")) {
    PO_VARIABLE(output, vm, "output", std::string);
    PO_VARIABLE(graph, vm, "graph", std::string);
    
    std::stringstream ss;
    ss << output << "_" << graph;
    if (graph.compare("equal") == 0) {
      PO_VARIABLE(degree, vm, "degree", int);
      ss << "_" << degree;
    }
    ss << "_" << MPI_SIZE << "_" << MPI_RANK << ".raw";
    a.H.save(ss.str(), raw_ascii);
  }

  if (MPI_RANK == 0 && vm.count("iterinfo")) {
    PO_VARIABLE(infoFile, vm, "iterinfo", std::string);
    FileAccessWrite(out, infoFile, NULL);
    const std::vector<Iteration>& info =  a.iterInfo;
    for (unsigned int i = 0; i < info.size(); i++) {
      out << info[i].iter << "\t" << SECONDS(info[i].duration) << "\t" 
	  << SECONDS(info[i].compute) << "\t" 
	  << SECONDS(info[i].communication) << "\t"
	  << info[i].residual << std::endl;
    }
  }

}

