//      nmf_master_slave.cc
//      Master/Slave Concensus NMF (solve an augmented Lagrangian optimization)
//        -- use Boost.MPI
//      
//      Copyright 2012 tqlong <tqlong@espada>
//      
//      This program is free software; you can redistribute it and/or modify
//      it under the terms of the GNU General Public License as published by
//      the Free Software Foundation; either version 2 of the License, or
//      (at your option) any later version.
//      
//      This program is distributed in the hope that it will be useful,
//      but WITHOUT ANY WARRANTY; without even the implied warranty of
//      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
//      GNU General Public License for more details.
//      
//      You should have received a copy of the GNU General Public License
//      along with this program; if not, write to the Free Software
//      Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
//      MA 02110-1301, USA.

#include "helper/stl_helper.h"
#include "helper/arma_helper.h"
#include "helper/mpi_helper.h"

#include "nmf.h"

extern po::variables_map vm;
extern mpi::communicator* p_world;

void reset_seed()
{
  ulong s = time(NULL);
  for (int i = 0; i < p_world->rank(); i++) s *= s;
  MSG("seed=" << s);
  std::srand(s);
}

void assign_rows(int M, std::vector<int>& begin, std::vector<int>& end_exclusive)
{
  FileAccessRead(in, VM(vm, "input", std::string));
  line head;
  if (vm.count("binary")) head.read(in);
  else { in >> head.i_ >> head.j_ >> head.s_; }

  int m = head.i_, m_i = m / M;
  for (int i = 0; i < M; i++) {
    begin[i] = i*m_i;
    end_exclusive[i] = i < M-1 ? (i+1)*m_i : m;
  }
}

void read_data(matrix_t& A, int begin, int end_exclusive) {
  read_sparse_matrix_subrows(VM(vm, "input", std::string), A, vm.count("binary"), begin, end_exclusive);
}

void master_slave_nmf()
{
  RequiredParameter(vm, "ms")("input")("output")("log")("k")("alpha")("iter")("rho");
  reset_seed();

  int M = p_world->size(); // number of workers
  std::vector<int> begin(M), end_exclusive(M); // rows assigned to each worker

  if (IS_MASTER_PROCESS)
    assign_rows(M, begin, end_exclusive);
  broadcast(*p_world, begin, 0);
  broadcast(*p_world, end_exclusive, 0);

  MSG("rank= "<< p_world->rank() << " begin=" << begin[p_world->rank()] 
      << " end=" << end_exclusive[p_world->rank()]);

  // initialization at each worker
  matrix_t A_i, Y_i, W_i, H_i, Z;
  read_data(A_i, begin[p_world->rank()], end_exclusive[p_world->rank()]);

  int m_i = A_i.n_rows, n = A_i.n_cols, k = VM(vm, "k", int);

  if (IS_MASTER_PROCESS)
    gen_sparse_matrix<matrix_t>(Z, k, n, 0.5);
  broadcast(*p_world, Z, 0);

  gen_sparse_matrix<matrix_t>(W_i, m_i, k, 0.5);
  gen_sparse_matrix<matrix_t>(H_i, k, n, 0.5);
  //H_i = Z;
  Y_i.zeros(k, n);

  // read parameter from command line
  int maxiter = VM(vm, "iter", int);
  double alpha = VM(vm, "alpha", double);
  double rho = VM(vm, "rho", double);
  std::string inFile = VM(vm, "input", std::string);
  std::string outFile = VM(vm, "output", std::string);
  std::string logFile = VM(vm, "log", std::string);

  // prepare temporaries
  matrix_t A_H(A_i.n_rows, H_i.n_rows);
  matrix_t A_W(A_i.n_cols, W_i.n_cols);
  matrix_t W_T_W(W_i.n_cols, W_i.n_cols);
  matrix_t H_T_H(H_i.n_rows, H_i.n_rows);

  matrix_t A_i_H(A_i.n_rows + k, A_i.n_cols);
  matrix_t W_i_H(W_i.n_rows + k, W_i.n_cols);
  A_i_H( arma::span(0, A_i.n_rows-1), arma::span::all ) = A_i;
  matrix_t Wnew = W_i, Hnew = H_i;

  // main loop
  if (IS_MASTER_PROCESS) FileAccessCreate(logFile);
  double rho_multiplier = exp(log(400)/maxiter);
  for (int iter = 0; iter < maxiter; iter++) {

    // each worker run a certain number of iterations to optimize W_i and H_i
    for (int node_iter = 0; node_iter < 5; node_iter++) {
      W_i = nmf_bcd_W<matrix_t>(A_i, W_i, H_i, alpha, A_H, H_T_H, Wnew);

      A_i_H( arma::span(A_i.n_rows, A_i_H.n_rows-1), arma::span::all) = sqrt(rho)*(Z - Y_i/rho);
      W_i_H( arma::span(0, W_i.n_rows-1), arma::span::all) = W_i;
      W_i_H( arma::span(W_i.n_rows, W_i_H.n_rows-1), arma::span::all) = sqrt(rho)*arma::eye(k, k);
      
      H_i = nmf_bcd_H<matrix_t>(A_i_H, W_i_H, H_i, alpha, A_W, W_T_W, Hnew);
    }
    
    // Compute average Z = ave(H_i + Y_i / rho) at master node, then broadcast Z
    matrix_t MSG_TO_MASTER = H_i + Y_i / rho;
    matrix_t TOTAL_MSG;
    reduce(*p_world, MSG_TO_MASTER, TOTAL_MSG, std::plus<matrix_t>(), 0);
    if (IS_MASTER_PROCESS) Z = TOTAL_MSG / M;
    broadcast(*p_world, Z, 0);

    // Update Multipliers (Ascend) at worker
    Y_i += rho*(H_i - Z);
    rho *= rho_multiplier;

    // error computation of this iteration
    double err = arma::norm(A_i-W_i*H_i,"fro"); err = sqr(err);
    double errZ = arma::norm(H_i-Z,"fro"); errZ = sqr(errZ);
    double total_err, total_errZ;
    reduce(*p_world, err, total_err, std::plus<double>(), 0);
    reduce(*p_world, errZ, total_errZ, std::plus<double>(), 0);

    // use master node to write log information
    if (IS_MASTER_PROCESS) {
      total_err = sqrt(total_err);
      total_errZ = sqrt(total_errZ);
      FileAccessAppend(log, logFile);
      log << "\t" << iter << "\t" << total_err << 
	"\t" << total_errZ << "\t" << TOTAL_SECONDS << ENDL;
      if (iter % 10 == 0 || iter == maxiter-1)
	MSG("iter=" << iter << " rho=" << rho << 
	    " err=" << total_err << " errZ=" << total_errZ << " d=" << duration_all());
    }
  }
}

