#ifndef PCPS_BFGS_HEADER
#define PCPS_BFGS_HEADER

#include <src/pcps.h>
#include <src/input.h>
#include <src/blas_lapack.h>

namespace pcps {

template <class WORKER>
inline void sd_step_search(const pcps::Input & userinp,
                           WORKER & worker,
                           const size_t n,
                           const double c1,
                           const double c2,
                           double & a,
                           const double * const p,
                           const double * const x0,
                           const double * const g0,
                           const double f0,
                                 double * x1,
                                 double * g1,
                                 double & f1) {

  // check sanity of input
  if ( n <= 0 )
    throw pcps::Exception("expected n > 0 in pcps::wolfe_search");
  if ( a <= 0.0 )
    throw pcps::Exception("expected a > 0 in pcps::wolfe_search");
  if ( c1 >= 1.0 || c2 >= 1.0 || c1 <= 0.0 || c2 <= 0.0 || c1 >= c2)
    throw pcps::Exception("pcps::wolfe_search requires 0.0 < c1 < c2 < 1.0");

  // get MPI info
  const MPI::Comm & comm = MPI::COMM_WORLD;
  const int nproc = comm.Get_size();
  const int myrank = comm.Get_rank();

  pcps::Stopwatch sw_sub;

  // initialize the Wolfe conditions
  bool decr_cond = false;
  bool curv_cond = false;

  //// compute the inner product of the initial gradient and the search direction vector
  //const double initial_product = (myrank == 0 ? pcps::ddot(n, g0, 1, p, 1) : 0.0);

  // perform the line search
  for (int iter = 0; iter < 100; iter++) {

    // print the step size
    if (myrank == 0)
      std::cout << boost::format("trying step length of %.9f") % a << std::endl
                << std::endl;

    sw_sub.reset(); sw_sub.start();

    // get x1
    if (myrank == 0) {
      pcps::dcopy(n, x0, 1, x1, 1);
      pcps::daxpy(n, a,  p, 1, x1, 1);
    } else {
      x1 = (double *)x0;
    }

    sw_sub.stop();
    if (myrank == 0)
      std::cout << boost::format("computing new position took %.4f seconds") % sw_sub.elapsed_seconds() << std::endl;

    sw_sub.reset(); sw_sub.start();

    comm.Bcast((void *)&x1[0], n, MPI::DOUBLE, 0);

    sw_sub.stop();
    if (myrank == 0)
      std::cout << boost::format("broadcasting new position took %.4f seconds") % sw_sub.elapsed_seconds() << std::endl;

    sw_sub.reset(); sw_sub.start();

    // get the value of the function
    f1 = worker.get_value(x1);

    sw_sub.stop();
    if (myrank == 0)
      std::cout << boost::format("computing energy took %.4f seconds") % sw_sub.elapsed_seconds() << std::endl;

    // compute the gradient at x1
    if (myrank != 0)
      g1 = (double *)g0;
    worker.get_gradient(x1, f1, g1);

    return;

//    // compute the inner product of the gradient at x1 with the search direction
//    const double current_product = (myrank == 0 ? pcps::ddot(n, g1, 1, p, 1) : 0.0);
//
//    // check the curvature condition
//    const double curv_decrease_factor = 0.3;
//    curv_cond = ( std::abs(current_product) < curv_decrease_factor * std::abs(initial_product) );
//    comm.Bcast((void *)&curv_cond, sizeof(bool), MPI::CHAR, 0);
//
//    // print the result of the curvature condition check
//    if (myrank == 0)
//      std::cout << boost::format("curvature condition: %s  c2 = %.6f  current slope = %12.4e  initial slope = %12.4e")
//                   % (curv_cond ? "PASSED" : "FAILED") % c2 % current_product % initial_product
//                << std::endl
//                << std::endl;
//
//    // if the curvature condition is satisfied then we are finished
//    if (curv_cond) return;
//
//    // otherwise update the step size
//    else if (myrank == 0) {
//      const double adjustment = 1.1;
//      if (current_product > curv_decrease_factor * std::abs(initial_product))
//        a /= adjustment;
//      else
//        a *= adjustment;
//    }

  }

  // raise errors if the conditions were not met
  if (myrank == 0)
    throw pcps::Exception("Error in pcps::sd_step_search:  curvature condition not met");

}

//-------------------------------------------------------------------------------
// pcps::wolfe_search -- This function performs a line search starting at x0
//                       along the direction specified by p.
//                       Upon exit, the point x1 will satisfy the Wolfe
//                       conditions and the gradient of the target funciton at
//                       x1 will be stored in g1.
//
//  Inputs:
//
//    worker    -- object that computes the target function's value and gradient
//    n         -- the dimension of the vectors x0, p, x1, g1, and g0
//    c1        -- parameter for the sufficient decrease condition
//    c2        -- parameter for the curvature condition
//    a         -- the step size
//    p         -- the search direction
//    x0        -- the starting point for the line search
//    g0        -- the gradient at x0
//    f0        -- the function value at x0
//
//  Outputs:
//
//    x1        -- on exit, the endpoint of the line search
//    g1        -- on exit, the gradient at x1
//    f1        -- on exit, the target function value at x1
//
//
//-------------------------------------------------------------------------------

template <class WORKER>
inline void wolfe_search(const pcps::Input & userinp,
                         WORKER & worker,
                         const size_t n,
                         const double c1,
                         const double c2,
                         double & a,
                         const double * const p,
                         const double * const x0,
                         const double * const g0,
                         const double f0,
                               double * x1,
                               double * g1,
                               double & f1,
                         const bool print,
                         const int max_iter = 10,
                         const bool parallel = true) {

  // check sanity of input
  if ( n <= 0 )
    throw pcps::Exception("expected n > 0 in pcps::wolfe_search");
  if ( a <= 0.0 )
    throw pcps::Exception("expected a > 0 in pcps::wolfe_search");
  if ( c1 >= 1.0 || c2 >= 1.0 || c1 <= 0.0 || c2 <= 0.0 || c1 >= c2)
    throw pcps::Exception("pcps::wolfe_search requires 0.0 < c1 < c2 < 1.0");

  // get MPI info
  const MPI::Comm & comm = MPI::COMM_WORLD;
  const int nproc = comm.Get_size();
  const int myrank = comm.Get_rank();

  // initialize the Wolfe conditions
  bool decr_cond = false;
  bool curv_cond = false;

  // compute the inner product of the initial gradient and the search direction vector
  const double initial_product = (myrank == 0 || !parallel ? pcps::ddot(n, g0, 1, p, 1) : 0.0);

  // perform the line search
  for (int iter = 0; iter < max_iter; iter++) {

    // print the step size
    if (myrank == 0 && print)
      std::cout << boost::format("trying step length of %.9f") % a << std::endl
                << std::endl;

    // get x1
    if (myrank == 0 || !parallel) {
      pcps::dscal(n, 0.0, x1, 1);
      pcps::daxpy(n, 1.0, x0, 1, x1, 1);
      pcps::daxpy(n,   a,  p, 1, x1, 1);
    } else {
      x1 = (double *)x0;
    }
    if (parallel)
      comm.Bcast((void *)&x1[0], n, MPI::DOUBLE, 0);
    //for (size_t i = 0; i < n; i++)
    //  x1[i] = x0[i] + a * p[i];

    // get the value of the function
    f1 = worker.get_value(x1);

    // check the sufficient decrease condition
    decr_cond = ( f1 <= f0 + a * c1 * initial_product );
    if (parallel)
      comm.Bcast((void *)&decr_cond, sizeof(bool), MPI::CHAR, 0);

    // print the result of the sufficient decrease condition check
    if (myrank == 0 && print)
      std::cout << boost::format("decrease condition:  %s  c1 = %.6f  current value = %12.4e  initial value = %12.4e")
                   % (decr_cond ? "PASSED" : "FAILED") % c1 % f1 % f0
                << std::endl
                << std::endl;

    // if the sufficient decrease condition is satisfied, check the curvature condition
    if (decr_cond) {

      // compute the gradient at x1
      if (myrank != 0 && parallel)
        g1 = (double *)g0;
      worker.get_gradient(x1, f1, g1);

      // compute the inner product of the gradient at x1 with the search direction
      const double current_product = (myrank == 0 || !parallel ? pcps::ddot(n, g1, 1, p, 1) : 0.0);

      // check the curvature condition
      curv_cond = ( current_product >= c2 * initial_product );
      if (parallel)
        comm.Bcast((void *)&curv_cond, sizeof(bool), MPI::CHAR, 0);

      // print the result of the curvature condition check
      if (myrank == 0 && print)
        std::cout << boost::format("curvature condition: %s  c2 = %.6f  current slope = %12.4e  initial slope = %12.4e")
                     % (curv_cond ? "PASSED" : "FAILED") % c2 % current_product % initial_product
                  << std::endl
                  << std::endl;

      // if both conditions are satisfied, stop iterating
      if (curv_cond) {

        // modify the step length for the next iteration
        if ( (myrank == 0  || !parallel) && std::abs( current_product / initial_product ) > 0.3 ) {
          if (current_product > 0.0)
            a /= 1.1;
          else
            a *= 1.1;
        }
        if (a > 1.0) a = 1.0;
        if (parallel)
          comm.Bcast((void *)&a, 1, MPI::DOUBLE, 0);

        // stop iterating
        break;

      }

      // if the curvature condition is not satisfied, increase the step size
      else {
        a *= 1.01;
        if (parallel)
          comm.Bcast((void *)&a, 1, MPI::DOUBLE, 0);
        if (myrank == 0 && print)
          std::cout << boost::format("Curvature condition not satisfied.  increasing step length.") << std::endl
                    << std::endl;
      }

    // if the sufficient decrease condition is not satisfied, decrease the step size
    } else {
      a *= 0.9;
      if (parallel)
        comm.Bcast((void *)&a, 1, MPI::DOUBLE, 0);
      if (myrank == 0 && print)
        std::cout << boost::format("Sufficient decrease condition not satisfied.  Decreasing step length.") << std::endl
                  << std::endl;
    }

  }

  //// return a bool telling whether the search was successful
  //return (decr_cond && curv_cond);

  // raise errors if the conditions were not met
  if ( (myrank == 0 || !parallel) && !decr_cond)
    throw pcps::Exception("Error in pcps::wolfe_search:  sufficient decrease condition not met");
  if ( (myrank == 0 || !parallel) && !curv_cond)
    throw pcps::Exception("Error in pcps::wolfe_search:  curvature condition not met");

}

//-------------------------------------------------------------------------------
// pcps::bfgs -- This function performs a BFGS minimization of the
//               target function.
//
//  Inputs:
//
//    worker   -- object that computes the target function's value and gradient,
//                and also deals with some other
//    max_iter -- the maximum number of BFGS iterations to perform
//    n        -- the dimension of the vectors x0 and x1
//    x0       -- the initial guess.  upon exit, x0 is destroyed
//
//  Outputs:
//
//    x1       -- upon exit, the solution
//
//-------------------------------------------------------------------------------

template <class WORKER>
inline void bfgs(const pcps::Input & userinp,
                 WORKER & worker,
                 const int max_iter,
                 const size_t n,
                 double grad_thresh,
                 double * const x0,
                 double * const x1,
                 const bool print,
                 const double c1 = 0.0001,
                 const double c2 = 0.9,
                 const bool grad_by_ratio = false,
                 const int max_wolfe_iter = 10,
                 const bool parallel = true) {

  // get MPI info
  const MPI::Comm & comm = MPI::COMM_WORLD;
  const int nproc = comm.Get_size();
  const int myrank = comm.Get_rank();

  // allocate the approximate inverse hessian matrix and initialize it to the identity matrix
  if (myrank == 0 && print)
    std::cout << "size of hessian = " << size_t(n) * size_t(n) * sizeof(double) << " bytes" << std::endl
              << std::endl;
  std::vector<double> inverse_hessian_vec((myrank == 0 || !parallel ? n*n : 1), 0.0);
  double * const h = &inverse_hessian_vec[0];
  if (myrank == 0 || !parallel)
    for (size_t i = 0; i < n; i++)
      h[i*n+i] = 1.0;

  // allocate search direction
  std::vector<double> search_direction_vec(n);
  double * const p = &search_direction_vec[0];

  // allocate gradients
  std::vector<double> grad_vec_0(n);
  std::vector<double> grad_vec_1(n);
  double * const g0 = &grad_vec_0[0];
  double * const g1 = &grad_vec_1[0];

  // prepare variables for the target function values
  double f0, f1;

  // compute the initial target function value
  f0 = worker.get_value(x0);

  // compute the initial gradient
  worker.get_gradient(x0, f0, g0);

  // compute and print the norm of the initial gradient
  {
    const double grad_norm = std::sqrt( pcps::ddot(n, g0, 1, g0, 1) );
    if (grad_by_ratio)
      grad_thresh *= grad_norm;
    if (myrank == 0 && print)
      std::cout << boost::format("gradient norm = %.2e") % grad_norm << std::endl;
      //          << std::endl;
  }

  // choose initial step size
  double step_size = userinp.step_size();

  // iterate
  bool converged = false;
  for (int iter = 0; iter < max_iter; iter++) {

    // start a timer
    pcps::Stopwatch sw; sw.start();

    // print a greeting
    if (myrank == 0 && print)
      std::cout << boost::format("BFGS:  starting iteration %4i") % iter << std::endl
                << std::endl;

    // compute the search direction ( p = - h * g0 )
    if (myrank == 0 || !parallel) {
      for (size_t i = 0; i < n; i++) {
        p[i] = 0.0;
        for (size_t j = 0; j < n; j++)
          p[i] -= h[i*n+j] * g0[j];
      }
      //pcps::dgemm('N', 'N', n, 1, n, -1.0, h, n, g0, n, 0.0, p, n);
    }
    if (print)
      worker.print("search direction", p);
    if (parallel)
      pcps::bcast(comm, p, n, MPI::DOUBLE, 0);
      //comm.Bcast((void *)p, n, MPI::DOUBLE, 0);

    // perform a line search to find a new point x1 that satisfies the Wolfe conditions and the gradient g1 at that point
    pcps::wolfe_search(userinp, worker, n, c1, c2, step_size, p, x0, g0, f0, x1, g1, f1, print, max_wolfe_iter, parallel);

    // perform any tasks requested to be done when a new position is found
    worker.process_new_point(x1);

    // print the new position
    if (print)
      worker.print("new position", x1);

    // check for convergence
    double grad_norm = 0.0;
    for (int i = 0; i < n; i++)
      grad_norm += g1[i] * g1[i];
    grad_norm = std::sqrt(grad_norm);
    if (myrank == 0 && print)
      std::cout << boost::format("gradient norm = %.2e") % grad_norm << std::endl;
      //          << std::endl;

    // if the gradient is small enough, stop iterating
    converged = (grad_norm < grad_thresh);
    if (converged) break;

    if (myrank == 0 || !parallel) {

      // replace the old gradient and position with the gradient and position differences
      for (size_t i = 0; i < n; i++)
        x0[i] = x1[i] - x0[i];
      for (size_t i = 0; i < n; i++)
        g0[i] = g1[i] - g0[i];

      // replace the search direction with the product of the inverse hessian and the gradient difference
      for (size_t i = 0; i < n; i++) {
        p[i] = 0.0;
        for (size_t j = 0; j < n; j++)
          p[i] += h[i*n+j] * g0[j];
      }
      //pcps::dgemm('N', 'N', n, 1, n, 1.0, h, n, g0, n, 0.0, p, n);

      // compute the square norm of the gradient difference using the inverse hessian as a metric
      double gd_norm = 0.0;
      for (size_t i = 0; i < n; i++)
        gd_norm += g0[i] * p[i];

      // compute the inverse of the inner product of the gradient and position differences
      double r = 0.0;
      for (size_t i = 0; i < n; i++)
        r += g0[i] * x0[i];
      r = 1.0 / r;

      // update the inverse hessian matrix
      for (size_t i = 0; i < n; i++)
        for (size_t j = 0; j < n; j++)
          h[i*n+j] = h[i*n+j] - r * ( p[i] * x0[j] + x0[i] * p[j] )
                              + r * r * x0[i] * gd_norm * x0[j]
                              + r * x0[i] * x0[j];
    }

    // move the new gradient and position into the old gradient and position
    f0 = f1;
    std::memcpy((void *)x0, (const void *)x1, size_t(n) * sizeof(double));
    std::memcpy((void *)g0, (const void *)g1, size_t(n) * sizeof(double));

    //// increase the step size if it is less than 1
    //step_size *= 1.1;
    //if (step_size > 1.0) step_size = 1.0;

    // report the elapsed time
    sw.stop();
    if (myrank == 0 && print)
      std::cout << boost::format("BFGS iteration took %.2f seconds") % sw.elapsed_seconds() << std::endl
                << std::endl;

  }

  // check that the method converged
  if (myrank == 0 && !converged)
    std::cout << "Warning: BFGS method did not converge" << std::endl;

}

template <class T> inline void lbfgs_shift_down(std::vector<T> & vec) {
    T temp = vec[0];
    for (int i = 0; i < vec.size()-1; i++)
      vec[i] = vec[i+1];
    vec[vec.size()-1] = temp;
}

//-------------------------------------------------------------------------------
// pcps::lbfgs -- This function performs an L-BFGS minimization of the
//                target function.
//
//  Inputs:
//
//    worker   -- object that computes the target function's value and gradient,
//                and also deals with some other bookkeeping
//    n        -- the dimension of the vectors x0 and x1
//    x0       -- the initial guess.  upon exit, x0 is destroyed
//
//  Outputs:
//
//    x1       -- upon exit, the solution
//
//-------------------------------------------------------------------------------

template <class WORKER>
inline void lbfgs(const pcps::Input & userinp,
                  WORKER & worker,
                  const int max_iter,
                  const size_t n,
                  double grad_thresh,
                  double * const x0,
                  double * const x1,
                  const bool print = true,
                  const double c1 = 0.0001,
                  const double c2 = 0.9,
                  const bool grad_by_ratio = false,
                  const int max_wolfe_iter = 10) {

  // get MPI info
  const MPI::Comm & comm = MPI::COMM_WORLD;
  const int nproc = comm.Get_size();
  const int myrank = comm.Get_rank();

  // choose how many previous gradients to remember
  const int m = userinp.bfgs_length();

  // allocate storage for the gradient and position differences
  boost::shared_array<double> grad_diff_vec = pcps::allocate_shared_array<double>((myrank == 0 ? size_t(n) * size_t(m) : size_t(1)), 0.0);
  boost::shared_array<double>  pos_diff_vec = pcps::allocate_shared_array<double>((myrank == 0 ? size_t(n) * size_t(m) : size_t(1)), 0.0);
  std::vector<double *> grad_diffs(m);
  std::vector<double *> pos_diffs(m);
  if (myrank == 0) {
    for (size_t i = 0; i < size_t(m); i++) {
      grad_diffs[i] = &grad_diff_vec[i*n];
      pos_diffs[i] = &pos_diff_vec[i*n];
    }
  }

  // allocate gradients
  boost::shared_array<double> grad_vec_0 = pcps::allocate_shared_array<double>(n, 0.0);
  boost::shared_array<double> grad_vec_1 = pcps::allocate_shared_array<double>((myrank == 0 ? n : 1), 0.0);
  double * g0 = &grad_vec_0[0];
  double * g1 = &grad_vec_1[0];

  // allocate search direction
  boost::shared_array<double> search_direction_vec = pcps::allocate_shared_array<double>((myrank == 0 ? n : 1), 0.0);
  double * p = &search_direction_vec[0];

  // allocate q vector
  boost::shared_array<double> q_vec = pcps::allocate_shared_array<double>((myrank == 0 ? n : 1), 0.0);
  double * q = &q_vec[0];

  // allocate z vector
  boost::shared_array<double> z_vec = pcps::allocate_shared_array<double>((myrank == 0 ? n : 1), 0.0);
  double * z = &z_vec[0];

  // allocate r and a vectors
  std::vector<double> r_vec(m, 0.0);
  std::vector<double> a_vec(m, 0.0);

  // prepare variables for the target function values
  double f0, f1;

  // compute the initial target function value
  comm.Bcast((void *)&x0[0], n, MPI::DOUBLE, 0);
  f0 = worker.get_value(x0);

  // compute the initial gradient
  worker.get_gradient(x0, f0, g0);

  // compute and print the norm of the initial gradient
  if (myrank == 0) {
    const double grad_norm = std::sqrt( pcps::ddot(n, g0, 1, g0, 1) );
    if (print)
      std::cout << boost::format("gradient norm = %.2e") % grad_norm << std::endl
                << std::endl;
    if (grad_by_ratio)
      grad_thresh *=  grad_norm;
  }

  // choose initial step size
  double step_size = userinp.step_size();

  // iterate
  bool converged = false;
  int m_used = 0;
  for (int iter = 0; iter < max_iter; iter++) {

    // start a timer for the total iteration time
    pcps::Stopwatch sw; sw.start();

    // prepare a timer for sub-iteration timings
    pcps::Stopwatch sw_sub;

    // print a greeting
    if (myrank == 0 && print)
      std::cout << boost::format("L-BFGS:  starting iteration %4i") % iter << std::endl
                << std::endl;

    sw_sub.reset(); sw_sub.start();

    // compute the search direction on the root process ( p = - h * g0 )
    if (myrank == 0) {
      if (m > 0) {
        pcps::dscal(n, 0.0, q, 1);
        pcps::daxpy(n, 1.0, g0, 1, q, 1);
        for (int i = m-1; i >= m - m_used; i--) {
          a_vec[i] = r_vec[i] * pcps::ddot(n, pos_diffs[i], 1, q, 1);
          pcps::daxpy(n, -a_vec[i], grad_diffs[i], 1, q, 1);
        }
        pcps::dscal(n, 0.0, z, 1);
        pcps::daxpy(n, 1.0, q, 1, z, 1);  // assumes h_0 = identity
        for (int i = m - m_used; i < m; i++) {
          const double b = r_vec[i] * pcps::ddot(n, grad_diffs[i], 1, z, 1);
          pcps::daxpy(n, a_vec[i]-b, pos_diffs[i], 1, z, 1);
        }
        pcps::dscal(n, 0.0, p, 1);
        pcps::daxpy(n, -1.0, z, 1, p, 1);
      } else {
        pcps::dcopy(n, g0, 1, p, 1);
        pcps::dscal(n, -1.0, p, 1);
      }
      // print the search direction
      worker.print("search direction", p);
    }

    sw_sub.stop();
    if (myrank == 0 && print)
      std::cout << boost::format("prepare search direction took %.4f seconds") % sw_sub.elapsed_seconds() << std::endl;

    //// broadcast the search direction to all processes
    //pcps::bcast(comm, p, n, MPI::DOUBLE, 0);
    ////comm.Bcast((void *)p, n, MPI::DOUBLE, 0);

    // perform a line search to find a new point x1 that satisfies the Wolfe conditions and the gradient g1 at that point
    if (m == 0)
      pcps::sd_step_search(userinp, worker, n, c1, c2, step_size, p, x0, g0, f0, x1, g1, f1);
    else 
      pcps::wolfe_search(userinp, worker, n, c1, c2, step_size, p, x0, g0, f0, x1, g1, f1, print, max_wolfe_iter);

    // if necessary, increase the precision with which the values are calculated
    worker.update_value_precision(f0, f1);

    // perform any tasks requested to be done when a new position is found
    if (myrank == 0)
      worker.process_new_point(x1);

    // print the new position
    if (myrank == 0)
      worker.print("new position", x1);

    sw_sub.reset(); sw_sub.start();

    // compute and print the norm of the new gradient
    const double grad_norm = (myrank == 0 ? std::sqrt( pcps::ddot(n, g1, 1, g1, 1) ) : 0.0);
    if (myrank == 0 && print)
      std::cout << boost::format("gradient norm = %.2e") % grad_norm << std::endl
                << std::endl;

    if (myrank == 0 && grad_norm > userinp.max_grad_norm())
      pcps::dscal(n, 0.0, g1, 1);

    sw_sub.stop();
    if (myrank == 0 && print)
      std::cout << boost::format("gradient norm took %.4f seconds") % sw_sub.elapsed_seconds() << std::endl;

    sw_sub.reset(); sw_sub.start();

    // if the gradient is small enough, stop iterating
    converged = (grad_norm < grad_thresh);
    comm.Bcast((void *)&converged, sizeof(bool), MPI::CHAR, 0);
    if (converged) break;

    sw_sub.stop();
    if (myrank == 0 && print)
      std::cout << boost::format("broadcasting norm took %.4f seconds") % sw_sub.elapsed_seconds() << std::endl;

    // update the history of gradient differences, position differences, and r values
    if (myrank == 0 && m > 0) {

      lbfgs_shift_down(pos_diffs);
      pcps::dscal(n, 0.0, pos_diffs[m-1], 1);
      pcps::daxpy(n, 1.0, x1, 1, pos_diffs[m-1], 1);
      pcps::daxpy(n,-1.0, x0, 1, pos_diffs[m-1], 1);
      //for (size_t j = 0; j < n; j++)
      //  pos_diffs[m-1][j] = x1[j] - x0[j];

      lbfgs_shift_down(grad_diffs);
      pcps::dscal(n, 0.0, grad_diffs[m-1], 1);
      pcps::daxpy(n, 1.0, g1, 1, grad_diffs[m-1], 1);
      pcps::daxpy(n,-1.0, g0, 1, grad_diffs[m-1], 1);
      //for (size_t j = 0; j < n; j++)
      //  grad_diffs[m-1][j] = g1[j] - g0[j];

      lbfgs_shift_down(r_vec);
      r_vec[m-1] = 1.0 / pcps::ddot(n, grad_diffs[m-1], 1, pos_diffs[m-1], 1);

      m_used = std::min(m, m_used + 1);

    }

    sw_sub.reset(); sw_sub.start();

    // move the new gradient, position, and function value into the old gradient, position, and function value
    f0 = f1;
    if (myrank == 0) {
      pcps::dcopy(n, x1, 1, x0, 1); // x0 = x1
      pcps::dcopy(n, g1, 1, g0, 1); // g0 = g1
    }

    sw_sub.stop();
    if (myrank == 0 && print)
      std::cout << boost::format("copying (x1, g1) to (x0, g0) took %.4f seconds") % sw_sub.elapsed_seconds() << std::endl;

    // report the elapsed time for this iteration
    sw.stop();
    if (myrank == 0 && print)
      std::cout << boost::format("L-BFGS iteration took %.2f seconds") % sw.elapsed_seconds() << std::endl
                << std::endl;

  }

  // check that the method converged
  if (myrank == 0 && print) {
    if (converged)
      std::cout << boost::format("L-BFGS method converged!") << std::endl;
    else
      std::cout << boost::format("L-BFGS method did not converge...") << std::endl;
    std::cout << std::endl;
  }

}

} // end namespace pcps

#endif
