#ifndef PCPS_ACCUMULATOR_STOCH_RECON_HEADER
#define PCPS_ACCUMULATOR_STOCH_RECON_HEADER

#include <src/pcps.h>
#include <src/input.h>
#include <src/accumulator.h>
#include <src/accumulator_energy.h>
#include <src/wavefunction.h>
#include <src/matrix_actor.h>
#include <src/conjugate_gradient.h>
#include <src/disk_io.h>

namespace pcps {

  //-----------------------------------------------------------------------------------
  // pcps::BlockManager -- Class for managing blocks of data.
  //-----------------------------------------------------------------------------------

  class BlockManager {

    private:

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // data members
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      const size_t _block_size;
      const size_t _array_size;
      const bool _use_disk;
      size_t _pos;
      char * _ptr;
      boost::shared_array<char> _fg_block;
      boost::shared_array<char> _bg_block;
      boost::shared_array<char> _wk_block;
      pcps::Disk_IO_Manager & _diom;
      std::map<std::string, boost::shared_array<char> > _block_storage;
      pthread_mutex_t _mut;
      pthread_cond_t _con;
      bool _disk_flag;
      std::string _bg_name;

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to return the buffer size to pad the data blocks with
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      static size_t buffer_size() { return 100*1024; }

    public:

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // constructor
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      BlockManager(const size_t block_size, pcps::Disk_IO_Manager & diom, const bool use_disk)
        : _block_size(block_size),
          _array_size(block_size + 2 * this->buffer_size()),
          _diom(diom),
          _use_disk(use_disk)
      {

        // initialize mutex and condition variables
        pthread_mutex_init(&_mut, NULL);
        pthread_cond_init(&_con, NULL);

        // allocate the data arrays
        _fg_block = pcps::allocate_shared_array<char>(_array_size);
        if (_use_disk)
          _bg_block = pcps::allocate_shared_array<char>(_array_size);

        // set the "working block"
        _wk_block = _fg_block;

        // set the data pointer and position counter
        this->reset();

      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // destructor
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      ~BlockManager() {

        // destroy mutex and condition variables
        pthread_mutex_destroy(&_mut);
        pthread_cond_destroy(&_con);

      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to reset the data pointer and position counter
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void reset() {
        _pos = 0;
        _ptr = _fg_block.get() + this->buffer_size();
      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to advance past a chunk of data, returning a pointer to the beginning of the chunk
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      template <class T> T * get(size_t n) {
        T * retval = (T *)_ptr;
        const size_t extra = ( 8 - n * sizeof(T) % 8 ) % 8;
        _pos += n * sizeof(T) + extra;
        _ptr += n * sizeof(T) + extra;
        if ( _pos > _block_size )
          throw pcps::Exception( (boost::format("block overrun in pcps::BlockManager::get")).str() );
        return retval;
      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to return the block size
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      size_t block_size() const { return _block_size; }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to tell how much space is left in the block
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      size_t remaining() const { return _block_size - _pos; }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to store the current block
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void store_block(const std::string & filename) {

        // disk based storage
        if (_use_disk) {

          // wait for background array to be available
          this->wait_for_bg();

          // swap foreground and background arrays
          std::swap(_fg_block, _bg_block);

          // set the name of the file to write to
          _bg_name = filename;

          // have the disk manager write the background block to disk in the background
          _diom.schedule_task('w', _array_size, _bg_block.get(), &_disk_flag, &_mut, &_con, &_bg_name);

        // in-memory storage
        } else {

          // if there is no block stored with the given file name, create one
          if ( _block_storage.count(filename) == 0 )
            _block_storage[filename] = pcps::allocate_shared_array<char>(_array_size);

          // store the data (no data is actually moved, we just exchange the pointers)
          std::swap(_fg_block, _block_storage[filename]);

        }

        // reset the position and pointer
        this->reset();

      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to begin fetching a block of data
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void prefetch(const std::string & filename) {

        // disk based storage
        if (_use_disk) {

          // wait for background array to be available
          this->wait_for_bg();

          // set the name of the file to write to
          _bg_name = filename;

          // have the disk manager read the data into the background block in the background
          _diom.schedule_task('r', _array_size, _bg_block.get(), &_disk_flag, &_mut, &_con, &_bg_name);

        // in-memory storage (no prefetching necessary)
        } else {

        }

      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to load a block that has already been prefetched
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void load_from_prefetch(const std::string & filename) {

        // disk based storage
        if (_use_disk) {

          // wait for background array to be available
          this->wait_for_bg();

          // make sure the filename is the one we prefetched
          if ( filename != _bg_name )
            throw pcps::Exception((boost::format("name does not match prefetched name in pcps::BlockManager::load_from_prefetch")).str());

          // swap the foreground and background arrays
          std::swap(_fg_block, _bg_block);

        // in-memory storage (no prefetching necessary)
        } else {

          // if there is no block stored with the given file name, raise an error
          if ( _block_storage.count(filename) == 0 )
            throw pcps::Exception( ( boost::format("could not find block with name \"%s\" in pcps::BlockManager::load_from_prefetch")
                                     % filename ).str() );

          // load the data block
          _fg_block = _block_storage[filename];

        }

        // reset the position and pointer
        this->reset();

      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to save the current block for later
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void save_working_block() {

        // disk based storage (working block not used)
        if (_use_disk) {

        // in-memory storage
        } else {

          // set the current block as the working block
          _wk_block = _fg_block;

        }

      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to load the "working block"
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void load_working_block() {

        // disk based storage (working block not used)
        if (_use_disk) {

        // in-memory storage
        } else {

          // set the current block as the working block
          _fg_block = _wk_block;

        }

        // reset the position and pointer
        this->reset();

      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to wait for the disk manager to finish with the background array
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void wait_for_bg() {
        pthread_mutex_lock(&_mut);
        if (!_disk_flag)
          pthread_cond_wait(&_con, &_mut);
        if (!_disk_flag)
          throw pcps::Exception( (boost::format("disk i/o did not complete properly in pcps::BlockManager::wait_for_bg")).str() );
        pthread_mutex_unlock(&_mut);
      }

  };

  // forward declaration of thread accumulator
  template <class S, class PART, class REF> class StochReconThreadAccum;

  //-----------------------------------------------------------------------------------
  // pcps::StochReconProcessAccum -- Accumulates one process's data needed for
  //                                 multiplying by the first derivative basis's
  //                                 overlap matrix and updates the wavefunction using
  //                                 the solution to the stochastic reconfiguration
  //                                 equation.
  //-----------------------------------------------------------------------------------

  template <class S, class PART, class REF> class StochReconProcessAccum : public pcps::ProcessAccum<S, PART, REF> {

    private:

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // data members
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      const pcps::OptSettings _os;                 // settings for wavefunction optimization
      const double _shift;                         // shift to apply to the overlap matrix's diagonal
      const double _thresh;                        // convergence threshold for conjugate gradient iterations
      const bool _diag_overlap_precon;             // whether to use the overlap matrix's diagonal as a preconditioner for conjugate gradient
      const bool _use_disk;                        // whether to use disk based storage for the derivative ratios
      const int _max_cg_iter;                      // maximum number of conjugate gradient iterations
      const size_t _block_size;                    // size in bytes of each block of derivative ratio data
      const std::string _workdir;                  // the working directory to store scratch files in
      bool _cg_converged;                          // flag to tell when conjugate gradient iterations have converged
      std::vector<int> _corr_n_elements;           // array for storing the number of elements in each variable correlator
      std::vector<int> _corr_offsets;              // positions of each correlator's variables in the ordering of all wavefunction variables
      std::vector<S> _process_d_vec;          // array for storing the congugate gradient direction vector (also holds flag variable at end)
      std::vector<S> _process_y_vec;          // array for storing the vector resulting from the matrix's action on the direction vector
      std::vector<S> _process_b_vec;          // array for storing the b vector for the linear equation
      std::vector<S> _process_x_vec;          // array for storing the solution to the linear equation
      std::vector<S> _process_overlap_diag;   // array for storing the diagonal of the overlap matrix
      pcps::Disk_IO_Manager _diom;                 // object to manage input and output to the disk

    public:

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // give the corresponding thread accumulator access to private and protected members
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      friend class pcps::StochReconThreadAccum<S, PART, REF>;

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // constructor
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      StochReconProcessAccum(const pcps::Input & userinp, const pcps::Wavefunction<S> & wfn, const long int sample_length)
        : pcps::ProcessAccum<S, PART, REF>(userinp, sample_length),
          _os(userinp, wfn),
          _diag_overlap_precon(userinp.precondition_sr()),
          _use_disk(userinp.sr_use_disk()),
          _shift(userinp.overlap_shift()),
          _thresh(userinp.cg_thresh()),
          _max_cg_iter(userinp.max_cg_iter()),
          _block_size( size_t(userinp.sr_disk_block_size()) * size_t(1024) * size_t(1024) ),
          _workdir(userinp.workdir()),
          _diom(userinp.sr_use_disk())
      {

        // initialize vectors
        const S z = pcps::zero<S>();
        if (_os.opt_cor) {
          _corr_n_elements.assign( size_t(_os.n_var_corr), -1);
          _corr_offsets.assign(    size_t(_os.n_var_corr), -1);
        }
        _process_d_vec.assign(         size_t(_os.dim+1)     , z);
        _process_y_vec.assign(         size_t(_os.dim)       , z);
        _process_b_vec.assign(         size_t(_os.dim)       , z);
        _process_x_vec.assign(         size_t(_os.dim)       , z);
        if (_diag_overlap_precon)
          _process_overlap_diag.assign(  size_t(_os.dim)       , z);

        // get the number of elements in each variable correlator and the correlator's offset
        if (_os.opt_cor) {

          // first process non-fixed, non-ti correlators
          int c = 0;
          int offset = 0;
          for (typename std::vector<pcps::Correlator<S> >::const_iterator corr = wfn.correlators().begin(); corr != wfn.correlators().end(); corr++) {
            if (!corr->fixed()) {
              if (!corr->ti()) {
                _corr_n_elements.at(c) = corr->nelements();
                _corr_offsets.at(c) = offset;
                offset += corr->nelements();
              }
              c++;
            }
          }
          assert( offset == _os.n_cor_var );

          // now process ti (translationally invariant) correlators
          c = 0;
          for (typename std::vector<pcps::Correlator<S> >::const_iterator corr = wfn.correlators().begin(); corr != wfn.correlators().end(); corr++) {
            if (!corr->fixed()) {
              if (corr->ti()) {
                const int x = wfn.get_ti_parent_index(corr->ti_sites());
                int y = 0;
                for (int i = 0; i < x; i++)
                  if (wfn.correlators().at(i).fixed())
                    y++;
                _corr_offsets.at(c) = _corr_offsets.at(x-y);
                _corr_n_elements.at(c) = _corr_n_elements.at(x-y);
              }
              c++;
            }
          }

          // check that we initialized all the element numbers and offsets
          for (int i = 0; i < _os.n_var_corr; i++) {
            if ( _corr_offsets.at(i) < 0 )
              throw pcps::Exception( (boost::format("error in stochastic reconfiguration accumulator. _corr_offsets[%i] initialized to %i.")
                                      % i % _corr_offsets.at(i) ).str() );
            if ( _corr_n_elements.at(i) <= 0 )
              throw pcps::Exception( (boost::format("error in stochastic reconfiguration accumulator. _corr_n_elements[%i] initialized to %i.")
                                      % i % _corr_n_elements.at(i) ).str() );
          }

        }

        // reset data
        this->reset();

      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to create a thread accumulator linked to this process accumulator
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      pcps::ThreadAccum<S, PART, REF> * create_thread_accum(const int tid, pcps::Arena & arena);

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to reset the accumulator
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void reset() {
        pcps::xscal(_os.dim, pcps::zero<S>(), &_process_d_vec.at(0),        1);
        pcps::xscal(_os.dim, pcps::zero<S>(), &_process_y_vec.at(0),        1);
        pcps::xscal(_os.dim, pcps::zero<S>(), &_process_b_vec.at(0),        1);
        pcps::xscal(_os.dim, pcps::zero<S>(), &_process_x_vec.at(0),        1);
        if (_diag_overlap_precon)
          pcps::xscal(_os.dim, pcps::zero<S>(), &_process_overlap_diag.at(0), 1);
      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to update the wavefunction using the solution to the stochastic reconfiguration linear equation
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void update_wfn(const pcps::Input & userinp, pcps::Wavefunction<S> & wfn) {

        // get MPI info
        const MPI::Comm & comm = MPI::COMM_WORLD;
        const int nproc = comm.Get_size();
        const int myrank = comm.Get_rank();

        // get a short name for the number of variables
        const int n = _os.dim;

        // root node computes and applies the update
        if (myrank == 0) {

          // scale the result by the timestep
          const S tau = _os.step_size * ( userinp.optimization() == pcps::keyword::Opt_Stochastic_Real_Time
                                               ? pcps::imaginary_unity<S>() : -pcps::unity<S>() );
          pcps::xscal(n, tau, &_process_x_vec.at(0), 1);

          // not implemented for real time evolution
          if ( userinp.optimization() == pcps::keyword::Opt_Stochastic_Real_Time )
            throw pcps::Exception( (boost::format("stochastic reconfiguration not fully implemented for real time evolution")).str() );

          // alter the weight of the old wavefunction to be one
          _process_x_vec.at(0) += pcps::unity<S>();
          assert( std::abs(_process_x_vec.at(0)) > 1.0e-3 );
          pcps::xscal(n-1, 1.0 / _process_x_vec.at(0), &_process_x_vec.at(1), 1);

          // update the correlators
          if (_os.opt_cor) {
            int c = 0;
            for (typename std::vector<pcps::Correlator<S> >::iterator corr = wfn.correlators().begin(); corr != wfn.correlators().end(); corr++)
              if (!corr->fixed()) {
                if (!corr->ti())
                  pcps::xaxpy(corr->nelements(), pcps::unity<S>(), &_process_x_vec.at(1 + _corr_offsets.at(c)), 1, corr->data_ptr(), 1);
                c++;
              }
          }

          // update the agp weights
          if (_os.opt_agp)
            pcps::xaxpy(_os.n_agp_var, pcps::unity<S>(), &_process_x_vec.at(1 + _os.n_cor_var), 1, &wfn.agp_weights()[0], 1);

          // update the orbital coefficients
          if (_os.opt_orb)
            pcps::xaxpy(_os.n_orb_var, pcps::unity<S>(), &_process_x_vec.at(1 + _os.n_cor_var + _os.n_agp_var), 1, &wfn.restricted_orb_coeffs()[0], 1);

          // update the pairing matrix
          if (_os.opt_par)
            pcps::xaxpy(_os.n_par_var, pcps::unity<S>(), &_process_x_vec.at(1 + _os.n_cor_var), 1, &wfn.pairing_matrix()[0], 1);

          // remove timestep and other alterations from the solution vector
          pcps::xscal(n-1, _process_x_vec.at(0), &_process_x_vec.at(1), 1);
          _process_x_vec.at(0) -= pcps::unity<S>();
          pcps::xscal(n, -pcps::unity<S>() / tau, &_process_x_vec.at(0), 1);

        }

        // broadcast wavefunction variables to all processes
        if (_os.opt_cor) {
          wfn.to_vector(userinp, (double *)&_process_b_vec.at(0));
          comm.Bcast(&_process_b_vec.at(0), _os.dim * sizeof(S), MPI::CHAR, 0);
          wfn.from_vector(userinp, (double *)&_process_b_vec.at(0));
        }
        if (_os.opt_agp)
          comm.Bcast(&wfn.agp_weights()[0], wfn.agp_weights().size() * sizeof(S), MPI::CHAR, 0);
        if (_os.opt_orb)
          comm.Bcast(&wfn.restricted_orb_coeffs()[0], wfn.restricted_orb_coeffs().size() * sizeof(S), MPI::CHAR, 0);
        if (_os.opt_par)
          comm.Bcast(&wfn.pairing_matrix()[0], wfn.pairing_matrix().size() * sizeof(S), MPI::CHAR, 0);

        // if requested, force the pairing matrix to be symmetric
        if (_os.opt_par && userinp.symmetric_pairing() && userinp.ref_type() == pcps::keyword::Ref_Pairing)
          wfn.symmetrize_pairing_matrix();

      }

  };

  //-----------------------------------------------------------------------------------
  // pcps::StochReconThreadAccum -- Accumulates one thread's data needed for
  //                                multiplying by the first derivative basis's overlap
  //                                matrix and solves the stochastic reconfiguration
  //                                linear equation iteratively by the conjugate
  //                                gradient method without actually building the
  //                                overlap matrix.
  //-----------------------------------------------------------------------------------

  template <class S, class PART, class REF> class StochReconThreadAccum : public pcps::ThreadAccum<S, PART, REF>,
                                                                               public pcps::MatrixActor<S> {

    private:

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // pointer to the corresponding process accumulator
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      pcps::StochReconProcessAccum<S, PART, REF> * const _srpacc;

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // other data members
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      const pcps::OptSettings _os;    // settings for wavefunction optimization
//      const size_t _disk_max_accept;  // number of accepts before the current disk block must be written to disk
      long int _n_accept;             // the number of configruations that have been visited
//      size_t _n_disk_blocks;          // the number of blocks of derivative data stored on disk
//      size_t _next_block;             // the number of the next disk block to load
      double * _counts;               // array for the number of times each configuration was visited
//      std::vector<boost::shared_array<char> > _blocks;
//      size_t * _block_lengths;        // array for the number of configurations' derivatives stored in each disk block
//      int    * _cor_der_indices;      // array for this clone's correlator derivative indices
//      S * _cor_der_values;       // array for this clone's correlator derivative values
//      S * _agp_der_values;       // array for this clone's agp weight derivative values
//      S * _orb_der_values;       // array for this clone's orbital coefficient derivative values
//      S * _par_der_values;       // array for this clone's pairing matrix derivative values
      S * _y_vec;                // array for use in constructing and solving the linear equation
      S * _r_vec;                // array for use in computing the overlap's diagonal and solving the linear equation
      S * _diag_temp;            // array used in the construction of the overlap matrix's diagonal
//      S * _disk_block_a;         // array used for reading/writing derivative data from/to the disk
//      S * _disk_block_b;         // array used for reading/writing derivative data from/to the disk
//      pthread_mutex_t _mutex_a;       // mutex for _disk_block_a
//      pthread_mutex_t _mutex_b;       // mutex for _disk_block_b
//      pthread_cond_t _cond_a;         // condition variable for _disk_block_a
//      pthread_cond_t _cond_b;         // condition variable for _disk_block_b
//      bool _flag_a;                   // flag for _disk_block_a
//      bool _flag_b;                   // flag for _disk_block_b
//      std::string _fname_a;           // file name for _disk_block_a
//      std::string _fname_b;           // file name for _disk_block_b
//      pthread_mutex_t * _mutex_ptr_a; // pointer to mutex for _disk_block_a
//      pthread_mutex_t * _mutex_ptr_b; // pointer to mutex for _disk_block_b
//      pthread_cond_t * _cond_ptr_a;   // pointer to condition variable for _disk_block_a
//      pthread_cond_t * _cond_ptr_b;   // pointer to condition variable for _disk_block_b
//      bool * _flag_ptr_a;             // pointer to flag for _disk_block_a
//      bool * _flag_ptr_b;             // pointer to flag for _disk_block_b
//      std::string * _fname_ptr_a;     // pointer to file name for _disk_block_a
//      std::string * _fname_ptr_b;     // pointer to file name for _disk_block_b
      const size_t _bytes_per_accept;   // bytes of derivative ratio data that must be stored for each accepted configuration
      size_t _block_num;                // index of the current derivative ratio block
      size_t _n_blocks;                 // number of derivative ratio blocks
      pcps::BlockManager _blocker;      // object to manage blocks of derivative ratio data

    public:

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // constructor
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      StochReconThreadAccum(pcps::StochReconProcessAccum<S, PART, REF> * const srpacc, const int tid, pcps::Arena & arena)
        : pcps::ThreadAccum<S, PART, REF>(srpacc, tid),
          _srpacc(srpacc),
          _os(srpacc->_os),
          //_disk_max_accept( srpacc->_disk_block_size / srpacc->_disk_bytes_per_accept ),
          //_flag_a(true),
          //_flag_b(true),
          //_mutex_ptr_a(&_mutex_a),
          //_mutex_ptr_b(&_mutex_b),
          //_cond_ptr_a(&_cond_a),
          //_cond_ptr_b(&_cond_b),
          //_flag_ptr_a(&_flag_a),
          //_flag_ptr_b(&_flag_b),
          //_fname_ptr_a(&_fname_a),
          //_fname_ptr_b(&_fname_b),
          _blocker(srpacc->_block_size, srpacc->_diom, srpacc->_use_disk),
          _bytes_per_accept( ( _os.opt_cor ? sizeof(int   ) * size_t(_os.n_var_corr) : size_t(0) ) +
                             ( _os.opt_cor ? sizeof(S) * size_t(_os.n_var_corr) : size_t(0) ) +
                             ( _os.opt_agp ? sizeof(S) * size_t(_os.n_agp_var ) : size_t(0) ) +
                             ( _os.opt_orb ? sizeof(S) * size_t(_os.n_orb_var ) : size_t(0) ) +
                             ( _os.opt_par ? sizeof(S) * size_t(_os.n_par_var ) : size_t(0) )   )
      {

        // raise an error if a single configuration's derivatives won't fit in a block
        if ( _blocker.block_size() < _bytes_per_accept )
          throw pcps::Exception("derivatives for a single configuration will not fit in a block in pcps::StochReconThreadAccum constructor");

//        if ( _srpacc->_disk_bytes_per_accept > _srpacc->_disk_block_size )
//          throw pcps::Exception("derivatives for a single configuration will not fit in a disk block in pcps::StochReconThreadAccum constructor");
//
//        _blocks.resize( size_t(this->_pacc->sample_length()) / _disk_max_accept + 1 );

        // initialize arrays
        const S z = pcps::zero<S>();
        _counts        = arena.allocate_array<double>( size_t(this->_pacc->sample_length()) , 0);
//        _block_lengths =              arena.allocate_array<size_t>( size_t(this->_pacc->sample_length()) / _disk_max_accept + 1, 0);
        _y_vec         = arena.allocate_array<S>( size_t(_os.dim)                      , z);
        if (this->_tid == 0 || _srpacc->_diag_overlap_precon)
          _r_vec     = arena.allocate_array<S>( size_t(_os.dim)                      , z);
        if (_srpacc->_diag_overlap_precon)
          _diag_temp = arena.allocate_array<S>( size_t(_os.n_cor_var)                , z);
//        if (_os.opt_cor) _cor_der_indices = arena.allocate_array<   int>( size_t(this->_pacc->sample_length()) * size_t(_os.n_var_corr) , 0);
//        if (_os.opt_cor) _cor_der_values  = arena.allocate_array<S>( size_t(this->_pacc->sample_length()) * size_t(_os.n_var_corr) , z);
//        if (_os.opt_agp) _agp_der_values  = arena.allocate_array<S>( size_t(this->_pacc->sample_length()) * size_t(_os.n_agp_var)  , z);
//        if (_os.opt_orb || _os.opt_par) {
//          _disk_block_a = arena.allocate_array<S>( std::min(_disk_max_accept, size_t(this->_pacc->sample_length()))
//                                                          * ( _srpacc->_disk_der_per_accept ), z);
//          _disk_block_b = arena.allocate_array<S>( std::min(_disk_max_accept, size_t(this->_pacc->sample_length()))
//                                                          * ( _srpacc->_disk_der_per_accept ), z);
//        }
//
//        if (_os.opt_orb && _os.opt_par)
//          throw pcps::Exception("pcps::StochReconThreadAccum does not allow optimization of both orbitals and pairing matrix");
//
//        // initialize mutexes and condition variables
//        pthread_mutex_init(_mutex_ptr_a, NULL);
//        pthread_mutex_init(_mutex_ptr_b, NULL);
//        pthread_cond_init(_cond_ptr_a, NULL);
//        pthread_cond_init(_cond_ptr_b, NULL);

        // initialize data
        this->reset();

      }

//      //////////////////////////////////////////////////////////////////////////////////////////////////////
//      // destructor
//      //////////////////////////////////////////////////////////////////////////////////////////////////////
//      ~StochReconThreadAccum() {
//
//        // destroy mutexes and condition variables
//        pthread_mutex_destroy(_mutex_ptr_a);
//        pthread_mutex_destroy(_mutex_ptr_b);
//        pthread_cond_destroy(_cond_ptr_a);
//        pthread_cond_destroy(_cond_ptr_b);
//
//      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to reset the accumulator
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void reset() {
        _n_accept = 0;
        _block_num = 0;
        _n_blocks = 0;
        _blocker.load_working_block();
        pcps::xscal(_os.dim,       pcps::zero<S>(), _y_vec,     1);
        if (this->_tid == 0 || _srpacc->_diag_overlap_precon)
          pcps::xscal(_os.dim,       pcps::zero<S>(), _r_vec,     1);
        if (_srpacc->_diag_overlap_precon)
          pcps::xscal(_os.n_cor_var, pcps::zero<S>(), _diag_temp, 1);
      }

      // Disk based pseudocode for computing and storing derivatives
      //
      //     Set both flags true.
      //     For accept:
      //       Write to A.
      //       If A full:
      //         Send A to disk.
      //         Wait for B to be available.
      //         Swap A and B.
      //     Send A to disk.
      //     Wait for A to be available.
      //     Wait for B to be available.

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to accumulate data needed for stochastic reconfiguration
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void accumulate(const long int iter, 
                      const long int count,
                      const bool accept,
                      const bool finished,
                      const S coeff_ratio,
                      const int * const new_config,
                      const int * const old_config,
                      pcps::CoeffValueFunc<S, PART, REF> & cvf,
                      pcps::ThreadAccum<S, PART, REF> ** accumulators,
                      const int n_accumulators) {

        if (accept || finished) {

          // get local energy
          S local_energy;
          {
            int i;
            for (i = 0; i < n_accumulators; i++)
              if (typeid(*accumulators[i]) == typeid(pcps::EnergyThreadAccum<S, PART, REF>)) {
                local_energy = ((pcps::EnergyThreadAccum<S, PART, REF> *)accumulators[i])->local_energy();
                break;
              }
            if (i == n_accumulators)
              throw pcps::Exception("error in pcps::StochReconThreadAccum::accumulate.  Could not find an EnergyThreadAccum accumulator.");
          }

          // if the blocker is out of space, store the current block and get a new one
          if ( _blocker.remaining() < _bytes_per_accept )
            _blocker.store_block( this->get_block_file_name(_block_num++) );

          // get pointers to where we will store this configuration's derivative ratio information
          int    * cor_der_ind; if (_os.opt_cor) cor_der_ind = _blocker.get<int   >(_os.n_var_corr);
          S * cor_der_val; if (_os.opt_cor) cor_der_val = _blocker.get<S>(_os.n_var_corr);
          S * agp_der_val; if (_os.opt_agp) agp_der_val = _blocker.get<S>(_os.n_agp_var );
          S * orb_der_val; if (_os.opt_orb) orb_der_val = _blocker.get<S>(_os.n_orb_var );
          S * par_der_val; if (_os.opt_par) par_der_val = _blocker.get<S>(_os.n_par_var );

          // get derivative ratios in compact form
          cvf.get_compact_derivatives(old_config, cor_der_ind, cor_der_val, agp_der_val, orb_der_val, par_der_val);

          // record the nuber of times this configuration was visited
          _counts[_n_accept] = double(count);

          // add this configuration's contribution to the b vector ( e.g. A x = b, we are using _y_vec to hold b for now)
          assert( !( _os.opt_agp && _os.opt_par ) );
          assert( !( _os.opt_orb && _os.opt_par ) );
          {

            // wavefunction without derivative
            _y_vec[ 0 ] += double(count) * local_energy;

            // correlators
            if (_os.opt_cor) {
              for (int c = 0; c < _os.n_var_corr; c++)
                _y_vec[ 1 + _srpacc->_corr_offsets[c] + cor_der_ind[c] ] += double(count) * local_energy * pcps::conj(cor_der_val[c]);
            }

            // agp weights
            if (_os.opt_agp) {
              for (int i = 0; i < _os.n_agp_var; i++)
                _y_vec[ 1 + _os.n_cor_var + i ] += double(count) * local_energy * pcps::conj(agp_der_val[i]);
            }

            // orbital coefficients
            if (_os.opt_orb) {
              for (int i = 0; i < _os.n_orb_var; i++)
                _y_vec[ 1 + _os.n_cor_var + _os.n_agp_var + i ] += double(count) * local_energy * pcps::conj(orb_der_val[i]);
            }

            // pairing matrix
            if (_os.opt_par) {
              for (int i = 0; i < _os.n_par_var; i++)
                _y_vec[ 1 + _os.n_cor_var + i ] += double(count) * local_energy * pcps::conj(par_der_val[i]);
            }

          }

          // add this configuration's contribution to the overlap matrix diagonal (stored in _r_vec for now)
          if (_srpacc->_diag_overlap_precon) {

            // wavefunction without derivative
            _r_vec[ 0 ] += double(count);

            // correlators
            if (_os.opt_cor) {
              // add contributions together BEFORE taking the square (this is critical in the case of translationally invariant correlators)
              for (int c = 0; c < _os.n_var_corr; c++)
                _diag_temp[_srpacc->_corr_offsets[c] + cor_der_ind[c]] += cor_der_val[c];
              // add the square of each derivative to the diagonal element, taking care not to add the same term twice
              for (int c = 0; c < _os.n_var_corr; c++) {
                _r_vec[ 1 + _srpacc->_corr_offsets[c] + cor_der_ind[c] ] +=
                  double(count) * pcps::square_norm(_diag_temp[_srpacc->_corr_offsets[c] + cor_der_ind[c]]);
                _diag_temp[_srpacc->_corr_offsets[c] + cor_der_ind[c]] = pcps::zero<S>();
              }
            }

            // agp weights
            if (_os.opt_agp) {
              for (int i = 0; i < _os.n_agp_var; i++)
                _r_vec[ 1 + _os.n_cor_var + i ] += double(count) * pcps::square_norm(agp_der_val[i]);
            }

            // orbital coefficients
            if (_os.opt_orb) {
              for (int i = 0; i < _os.n_orb_var; i++)
                _r_vec[ 1 + _os.n_cor_var + _os.n_agp_var + i ] += double(count) * pcps::square_norm(orb_der_val[i]);
            }

            // pairing matrix
            if (_os.opt_par) {
              for (int i = 0; i < _os.n_par_var; i++)
                _r_vec[ 1 + _os.n_cor_var + i ] += double(count) * pcps::square_norm(par_der_val[i]);
            }

          }

          // increment the number of visited configurations
          _n_accept++;

//          // if the disk block is full or if we are finished, write the block to disk
//          if ( ( _os.opt_orb || _os.opt_par ) && ( size_t(_n_accept) % _disk_max_accept == 0 || finished ) ) {
//
//            // determine which disk block we are working with
//            const size_t bi = ( _n_accept - 1 ) / _disk_max_accept;
//
//            // get the number of configurations whose derivatives will be written to disk
//            const size_t nconf = size_t(_n_accept-1) % _disk_max_accept + 1;
//
//            // if we are not writing to disk, store the data in memory
//            if (!_srpacc->_use_disk) {
//
//              // allocate a new block if the old block is too small
//              if ( _block_lengths[bi] < nconf ) {
//                _blocks[bi] = boost::shared_array<char>();                                                  // deallocate
//                _blocks[bi] = pcps::allocate_shared_array<char>( nconf * _srpacc->_disk_bytes_per_accept ); // reallocate
//              }
//
//              // copy the derivative data to the storage block
//              std::memcpy(_blocks[bi].get(), _disk_block_a, nconf * _srpacc->_disk_bytes_per_accept);
//
//            // otherwise write to disk
//            } else {
//
//              // get the file name to write the block to
//              this->get_block_file_name(bi, _fname_ptr_a);
//
//              // shedule the block to be written to disk
//              _srpacc->_diom.schedule_task('w', nconf * _srpacc->_disk_bytes_per_accept, (char *)_disk_block_a,
//                                           _flag_ptr_a, _mutex_ptr_a, _cond_ptr_a, _fname_ptr_a);
//
//              // wait for the previous write to finish so we can use the other disk block
//              this->wait_for_disk_block(_flag_ptr_b, _mutex_ptr_b, _cond_ptr_b, _fname_ptr_b);
//
//              // once the previous write is finished, switch to the other disk block
//              // this way we can keep working while the most recently finished disk block is being written to disk
//              this->swap_disk_pointers();
//
//            }
//
//            // remember how many configurations were in the block
//            _block_lengths[bi] = nconf;
//
//          }
//
        }

        if (finished) {

          // store the last block of derivative ratio data
          _blocker.store_block( this->get_block_file_name(_block_num++) );

          // set aside the current block to use later
          _blocker.save_working_block();

          // get the total number of samples
          const S total_samples = pcps::unity<S>() * this->_pacc->total_samples();

          pthread_mutex_lock(this->_pacc->mutex());

          // add this thread's b vector to the b vector for this process (which will be the initial residual for CG, stored in the r vector)
          pcps::xaxpy(_os.dim, 1.0 / total_samples, _y_vec, 1, &_srpacc->_process_b_vec.at(0), 1);

          // add this thread's overlap matrix diagonal to the total for this process
          if (_srpacc->_diag_overlap_precon)
            pcps::xaxpy(_os.dim, 1.0 / total_samples, _r_vec, 1, &_srpacc->_process_overlap_diag.at(0), 1);

          pthread_mutex_unlock(this->_pacc->mutex());

          // wait for other threads to add their contributions
          pthread_barrier_wait(this->_pacc->barrier());

          // reduce the r vector and the overlap diagonal onto the root process
          if (this->_tid == 0) {
            pcps::xcopy(_os.dim, &_srpacc->_process_b_vec.at(0), 1, _y_vec, 1);
            pcps::xscal(_os.dim, pcps::zero<S>(), &_srpacc->_process_b_vec.at(0), 1);
            pcps::reduce(MPI::COMM_WORLD, _y_vec, &_srpacc->_process_b_vec.at(0), _os.dim, MPI::SUM, 0);
            if (_srpacc->_diag_overlap_precon) {
              pcps::xcopy(_os.dim, &_srpacc->_process_overlap_diag.at(0), 1, _r_vec, 1);
              pcps::xscal(_os.dim, pcps::zero<S>(), &_srpacc->_process_overlap_diag.at(0), 1);
              pcps::reduce(MPI::COMM_WORLD, _r_vec, &_srpacc->_process_overlap_diag.at(0), _os.dim, MPI::SUM, 0);
            }
          }

//          // wait for any disk writing to complete
//          if (_srpacc->_use_disk) {
//            this->wait_for_disk_block(_flag_ptr_a, _mutex_ptr_a, _cond_ptr_a, _fname_ptr_a);
//            this->wait_for_disk_block(_flag_ptr_b, _mutex_ptr_b, _cond_ptr_b, _fname_ptr_b);
//          }

          // wait for thread 0 to communicate
          pthread_barrier_wait(this->_pacc->barrier());

        }

      }

      // Disk based pseudocode for loading derivatives
      //
      //     Both flags should be true.
      //     Load block 0 into A.
      //     Load block 1 into B.
      //     For CG iter:
      //       For accept:
      //         If done with A:
      //           Swap A and B.
      //           Load next block into B.
      //           Wait for A to load.
      //         Read from A.
      //       // after accept loop, block 0 will be in B
      //       Swap A and B.
      //       Load block 1 into B.
      //       Wait for A to load.

//      //////////////////////////////////////////////////////////////////////////////////////////////////////
//      // function to wait for a disk block to finish its i/o operation
//      //////////////////////////////////////////////////////////////////////////////////////////////////////
//      void wait_for_disk_block(const bool * const flag, pthread_mutex_t * m, pthread_cond_t * c, const std::string * const fname) {
//        pthread_mutex_lock(m);
//        if (!*flag)
//          pthread_cond_wait(c, m);
//        if (!*flag)
//          throw pcps::Exception( (boost::format("block for file \"%s\" not ready in pcps::StochReconProcessAccum::wait_for_disk_block")
//                                    % *fname).str() );
//        pthread_mutex_unlock(m);
//      }
//
//      //////////////////////////////////////////////////////////////////////////////////////////////////////
//      // function to swap the "a" and "b" disk block pointers
//      //////////////////////////////////////////////////////////////////////////////////////////////////////
//      void swap_disk_pointers() {
//        std::swap(_disk_block_a, _disk_block_b);
//        std::swap(_mutex_ptr_a,  _mutex_ptr_b);
//        std::swap(_cond_ptr_a,   _cond_ptr_b);
//        std::swap(_flag_ptr_a,   _flag_ptr_b);
//        std::swap(_fname_ptr_a,  _fname_ptr_b);
//      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to set the file name for the given disk block
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      std::string get_block_file_name(const size_t block_num) {
        return ( boost::format("%spcps_sr_thread_%i_disk_block_%i.bin") % _srpacc->_workdir % this->_tid % block_num ).str();
      }

//      //////////////////////////////////////////////////////////////////////////////////////////////////////
//      // function to start loading the specified block of derivatives into the "b" block
//      //////////////////////////////////////////////////////////////////////////////////////////////////////
//      void load_block_into_b(const size_t block_num) {
//
//        // set the file name
//        this->get_block_file_name(block_num, _fname_ptr_b);
//
//        // schedule the next block to be read from disk
//        _srpacc->_diom.schedule_task('r', _block_lengths[block_num] * _srpacc->_disk_bytes_per_accept, (char *)_disk_block_b,
//                                     _flag_ptr_b, _mutex_ptr_b, _cond_ptr_b, _fname_ptr_b);
//
//      }
//
//      //////////////////////////////////////////////////////////////////////////////////////////////////////
//      // function to start loading the next block of derivatives from the disk
//      //////////////////////////////////////////////////////////////////////////////////////////////////////
//      void load_next_disk_block() {
//
//        // swap the "a" and "b" pointers
//        this->swap_disk_pointers();
//
//        // schedule the next block to be read from disk
//        this->load_block_into_b(_next_block);
//
//        // set the block number of the next block to load
//        _next_block = ( _next_block + 1 ) % _n_disk_blocks;
//
//        // wait for the "a" block to be ready
//        this->wait_for_disk_block(_flag_ptr_a, _mutex_ptr_a, _cond_ptr_a, _fname_ptr_a);
//
//      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to solve the linear equation for the update to the variables
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void compute_update() {

//        // load derivatives from disk if necessary
//        if ( _srpacc->_use_disk && ( _os.opt_orb || _os.opt_par ) ) {
//
//          // get the number of disk blocks
//          _n_disk_blocks = size_t(_n_accept-1) / _disk_max_accept + 1;
//          assert( _n_disk_blocks > 0 );
//
//          // shedule the first block to be loaded from disk
//          this->load_block_into_b(0);
//
//          // wait for other threads to schedule their first block load
//          pthread_barrier_wait(this->_pacc->barrier());
//
//          // if there is only one disk block, point to it with the "a" pointers and wait for it to load
//          if ( _n_disk_blocks == 1 ) {
//
//            // change to "a" pointers for the block
//            this->swap_disk_pointers();
//
//            // wait for first block to be read from disk
//            this->wait_for_disk_block(_flag_ptr_a, _mutex_ptr_a, _cond_ptr_a, _fname_ptr_a);
//
//          // if there is more than one block, begin loading the second block
//          } else {
//            _next_block = 1;
//            this->load_next_disk_block();
//          }
//
//        }

        // load the first block of derivative ratio data and start fetching the second block
        _n_blocks = _block_num;
        _blocker.prefetch( this->get_block_file_name(0) );
        _blocker.load_from_prefetch( this->get_block_file_name(0) );
        _block_num = 1 % _n_blocks;
        _blocker.prefetch( this->get_block_file_name(_block_num) );

        // solve the linear equation via the conjugate gradient method
        pcps::conjugate_gradient(_os.dim,
                                 _srpacc->_max_cg_iter,
                                 &_srpacc->_process_x_vec.at(0), 
                                 &_srpacc->_process_b_vec.at(0), 
                                 &_srpacc->_process_y_vec.at(0), 
                                 &_srpacc->_process_d_vec.at(0), 
                                 _r_vec, 
                                 this, // this class is a child of the MatrixActor class
                                 this->_tid);

//        // if derivatives were being loaded from disk, wait for all disk reading to finish
//        if ( _srpacc->_use_disk && ( _os.opt_orb || _os.opt_par ) ) {
//          this->wait_for_disk_block(_flag_ptr_a, _mutex_ptr_a, _cond_ptr_a, _fname_ptr_a);
//          this->wait_for_disk_block(_flag_ptr_b, _mutex_ptr_b, _cond_ptr_b, _fname_ptr_b);
//        }

      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to perform y = A d  (in this case A is the overlap matrix) (inherited from MatrixActor)
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void operate_by_A(const int n, S * const d, S * const y, const int myrank, const int tid) {

        assert(n == _os.dim);

//        // save the location of disk block a
//        S * const disk_block_a_save = _disk_block_a;
//
//        // if reading from memory, get the first block
//        if (!_srpacc->_use_disk) {
//          _next_block = 0;
//          _disk_block_a = (S *)_blocks[_next_block++].get();
//        }

        // wait for all threads to enter the function
        pthread_barrier_wait(this->_pacc->barrier());

        // broadcast the trial vector to all processes and zero the result vector
        if (tid == 0) {
          MPI::COMM_WORLD.Bcast(d, n * sizeof(S), MPI::CHAR, 0);
          pcps::xscal(n, pcps::zero<S>(), y, 1);
        }

        // wait for thread 0 to finish broadcast
        pthread_barrier_wait(this->_pacc->barrier());

        // compute action of each visited configuration's overlap matrix on the search direction
        pcps::xscal(n, pcps::zero<S>(), _y_vec, 1);
        for (int ai = 0; ai < _n_accept; ai++) {

//          // if we are ready for the next block of derivatives, load it from disk
//          if ( ( _os.opt_orb || _os.opt_par ) && _n_disk_blocks > 1 && ai % _disk_max_accept == 0 && ai != 0 ) {
//
//            // if reading from disk
//            if (_srpacc->_use_disk) {
//
//              if (_next_block == 1)
//                throw pcps::Exception("next block should never be block one during ai loop of pcps::StochReconThreadAccum::operate_by_A.");
//              this->load_next_disk_block();
//
//            // if reading from memory
//            } else {
//              _disk_block_a = (S *)_blocks[_next_block++].get();
//            }
//
//          }

          // if we have reached the end of the block, get the next block and start fetching the block after that
          if ( _blocker.remaining() < _bytes_per_accept ) {
            _blocker.load_from_prefetch( this->get_block_file_name(_block_num) );
            _block_num = ( _block_num + 1 ) % _n_blocks;
            _blocker.prefetch( this->get_block_file_name(_block_num) );
          }

          // get pointers to this configuration's derivative ratio information
          const int    * cor_der_ind; if (_os.opt_cor) cor_der_ind = _blocker.get<int   >(_os.n_var_corr);
          const S * cor_der_val; if (_os.opt_cor) cor_der_val = _blocker.get<S>(_os.n_var_corr);
          const S * agp_der_val; if (_os.opt_agp) agp_der_val = _blocker.get<S>(_os.n_agp_var );
          const S * orb_der_val; if (_os.opt_orb) orb_der_val = _blocker.get<S>(_os.n_orb_var );
          const S * par_der_val; if (_os.opt_par) par_der_val = _blocker.get<S>(_os.n_par_var );

          // wavefunction without derivative
          S total = d[0];

          // correlators
          if (_os.opt_cor) {
            for (int c = 0; c < _os.n_var_corr; c++)
              total += d[ 1 + _srpacc->_corr_offsets[c] + cor_der_ind[c] ] * cor_der_val[c];
          }

          // agp weights
          if (_os.opt_agp)
            total += pcps::xdot(_os.n_agp_var, agp_der_val, 1, d + 1 + _os.n_cor_var, 1);

          // orbital coefficients
          if (_os.opt_orb)
            total += pcps::xdot(_os.n_orb_var, orb_der_val, 1, d + 1 + _os.n_cor_var + _os.n_agp_var, 1);

          // pairing matrix
          if (_os.opt_par)
            total += pcps::xdot(_os.n_par_var, par_der_val, 1, d + 1 + _os.n_cor_var, 1);

          // account for the number of times the configuration was sampled
          total *= _counts[ai];

          // wavefunction without derivative
          _y_vec[0] += total;

          // correlators
          if (_os.opt_cor) {
            for (int c = 0; c < _os.n_var_corr; c++)
              _y_vec[ 1 + _srpacc->_corr_offsets[c] + cor_der_ind[c] ] += total * pcps::conj(cor_der_val[c]);
          }

          // agp weights
          if (_os.opt_agp)
            for (int i = 0; i < _os.n_agp_var; i++)
              _y_vec[ 1 + _os.n_cor_var + i ] += total * pcps::conj(agp_der_val[i]);

          // orbital coefficients
          if (_os.opt_orb)
            for (int i = 0; i < _os.n_orb_var; i++)
              _y_vec[ 1 + _os.n_cor_var + _os.n_agp_var + i ] += total * pcps::conj(orb_der_val[i]);

          // pairing matrix
          if (_os.opt_par)
            for (int i = 0; i < _os.n_par_var; i++)
              _y_vec[ 1 + _os.n_cor_var + i ] += total * pcps::conj(par_der_val[i]);

        }

        // get the next block and start fetching the block after that
        _blocker.load_from_prefetch( this->get_block_file_name(_block_num) );
        _block_num = ( _block_num + 1 ) % _n_blocks;
        _blocker.prefetch( this->get_block_file_name(_block_num) );
        if ( _n_blocks > 1 && _block_num != 1 )
          throw pcps::Exception("next block must be block one after the ai loop of pcps::StochReconThreadAccum::operate_by_A.");

//        // load the next block of derivatives from disk
//        if ( _srpacc->_use_disk && ( _os.opt_orb || _os.opt_par ) && _n_disk_blocks > 1 ) {
//          if (_next_block != 1)
//            throw pcps::Exception("next block must be block one after the ai loop of pcps::StochReconThreadAccum::operate_by_A.");
//          this->load_next_disk_block();
//        }

        // combine actions from all threads for this process
        pthread_mutex_lock(this->_pacc->mutex());
        pcps::xaxpy(n, pcps::unity<S>() / this->_pacc->total_samples(), _y_vec, 1, y, 1);
        pthread_mutex_unlock(this->_pacc->mutex());

        // wait for all threads to add their actions
        pthread_barrier_wait(this->_pacc->barrier());

        // reduce actions from all processes on the root process
        if (tid == 0) {
          pcps::xcopy(n, y, 1, _y_vec, 1);
          pcps::xscal(n, pcps::zero<S>(), y, 1);
          pcps::reduce(MPI::COMM_WORLD, _y_vec, y, n, MPI::SUM, 0);
        }

        // add diagonal shift on root thread/process
        if (myrank == 0 && tid == 0)
          pcps::xaxpy(n, pcps::unity<S>() * _srpacc->_shift, d, 1, y, 1);

//        // print 
//        if (myrank == 0 && tid == 0)
//          std::cout << boost::format("multiplied by the overlap matrix") << std::endl;

        // wait for thread 0 to finish communication
        pthread_barrier_wait(this->_pacc->barrier());

//        // if we were not loading from disk, restore the disk block a pointer
//        if (!_srpacc->_use_disk)
//          _disk_block_a = disk_block_a_save;

      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to perform y = M d, where M is our preconditioning operator (inherited from MatrixActor)
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      void operate_by_M(const int n, S * const d, S * const y, const int myrank, const int tid) {
        assert(n == _os.dim);
        if (myrank == 0 && tid == 0) {
          pcps::xcopy(n, d, 1, y, 1);
          if (_srpacc->_diag_overlap_precon)
            for (int i = 0; i < n; i++)
              y[i] /= std::sqrt(_srpacc->_process_overlap_diag[i] + pcps::unity<S>() * _srpacc->_shift);
        }
      }

      //////////////////////////////////////////////////////////////////////////////////////////////////////
      // function to check if the cojugate gradient iterations have converged (inherited from MatrixActor)
      //////////////////////////////////////////////////////////////////////////////////////////////////////
      bool converged(const double residual, const int myrank, const int tid) {
        if (myrank == 0 && tid == 0) {
          assert( residual > 0.0 );
          assert( _srpacc->_thresh > 0.0 );
          _srpacc->_cg_converged = ( residual < _srpacc->_thresh );
        }
        if (tid == 0)
          MPI::COMM_WORLD.Bcast(&_srpacc->_cg_converged, 1, MPI::BOOL, 0);
        pthread_barrier_wait(this->_pacc->barrier());
        return _srpacc->_cg_converged;
      }

  };

  //------------------------------------------------------------------------------------------
  // pcps::StochReconProcessAccum::create_thread_accum -- Function to create a thread accumulator
  //                                                      from a process accumulator.
  //------------------------------------------------------------------------------------------

  template <class S, class PART, class REF>
  pcps::ThreadAccum<S, PART, REF> * pcps::StochReconProcessAccum<S, PART, REF>::create_thread_accum(const int tid, pcps::Arena & arena) {
    return new(arena) pcps::StochReconThreadAccum<S, PART, REF>(this, tid, arena);
  }

} // end namespace pcps

#endif
