/*
This file is part of GraphLab.

GraphLab is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as 
published by the Free Software Foundation, either version 3 of 
the License, or (at your option) any later version.

GraphLab is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public 
License along with GraphLab.  If not, see <http://www.gnu.org/licenses/>.
*/

#ifndef GRAPHLAB_MR_DISK_GRAPH_CONSTRUCTION_HPP
#define GRAPHLAB_MR_DISK_GRAPH_CONSTRUCTION_HPP
#include <boost/bind.hpp>
#include <graphlab/graph/graph.hpp>
#include <graphlab/util/stl_util.hpp>
#include <graphlab/parallel/pthread_tools.hpp>
#include <graphlab/graph/disk_graph.hpp>
#include <graphlab/rpc/dc.hpp>
#include <graphlab/graph/mr_disk_graph_construction_impl.hpp>

namespace graphlab {

/**
  * Standard interface for full parallel/distributed construction of a 
  * disk_graph. User first subclasses the igraph_constructor, implementing
  * the begin(), iterate() and vertex_to_atomid() methods. 
  * See \ref mr_disk_graph_construction for details.
  * 
  * The subclass must be copy-constructable.
  * 
  */
template <typename VertexData, typename EdgeData>
class igraph_constructor {
 public:
  
  enum iterate_return_type {
    Vertex,
    Edge,
    NoMoreData = -1,
  };
  /// constructor
  igraph_constructor() { };
  
  /// destructor
  virtual ~igraph_constructor() { };
  
  /**
   * 'max' (possibly distributed instances) of the user subclass is created 
   * using the copy constructor. (max is defined by the arguments 
   * into \ref mr_disk_graph_construction )
   * begin() is then called on each instance with a unique 'i' from 0 to 
   * max - 1.
   */
  virtual void begin(size_t i, size_t max) = 0;
  
  /**
   * after begin() is called, iterate() is called repeatedly.
   * Each call to iterate() must return either a vertex or an edge through
   * its arguments which are passed by reference.
   *  \param irt 
   *  \param vtx If return_value == Vertex, this is the vertex ID returned. This parameter
   *             is ignored if return_value == Edge.
   *  \param vdata If return_value == Vertex, the vertexdata to be created on vertex 'vtx'
   *              is returned here.
   *  \param color If return_value == Vertex, this should contain the vertex color  of 'vtx'
   *  \param edge If return_value == Edge, this is the edge to be inserted. This parameter
   *             is ignored if return_value == Vertex.
   *  \param edata if return_value == Edge, the edgedata to be created on the edge 
   *              'edge' is returned here.
   * \return if return value is Vertex or Edge, the actual data will be read
   *         from the arguments. If return value is NoMoreData, no data is read,
   *         and the construction loop is terminated.
   */
  virtual iterate_return_type iterate(vertex_id_t& vtx, 
                                      VertexData& vdata,
                                      uint32_t& color,
                                      std::pair<vertex_id_t, vertex_id_t>& edge, 
                                      EdgeData & edata) = 0;
                       
  /**
   * This function provides the mapping from vertex to atom ID.
   * The behavior of this function must be consistent across all (distributed)
   * instances.
   */
  virtual uint16_t vertex_to_atomid(vertex_id_t vtx, uint16_t numatoms) = 0;
  
  /**
   * Used by mr_disk_graph_construction. Creates a section of the disk graph
   * using the information generated by this class.
   */
  void mr_disk_graph_construction_map(disk_graph<VertexData, EdgeData> &dg,
                                      size_t i,
                                      size_t max) {
    begin(i, max);
    
    iterate_return_type irt;
    vertex_id_t vtx;
    std::pair<vertex_id_t, vertex_id_t> edge;
    VertexData vdata;
    EdgeData edata;
    uint16_t numatoms = (uint16_t) dg.num_atoms();
    // loop over iterate until it returns an NoMoreData
    uint32_t color = 0;
    while((irt = iterate(vtx, vdata, color, edge, edata)) != NoMoreData){   
      if (irt == Vertex) {
        uint16_t location = vertex_to_atomid(vtx, numatoms);
        ASSERT_LT(location, numatoms);
        dg.add_vertex_unsafe(vtx, vdata, location);
        dg.set_color(vtx, color);
      }
      else if (irt == Edge) {
        uint16_t locationsrc = vertex_to_atomid(edge.first, numatoms);
        uint16_t locationdest = vertex_to_atomid(edge.second, numatoms);
        ASSERT_LT(locationsrc, numatoms);
        ASSERT_LT(locationdest, numatoms);
        dg.add_edge_explicit(edge.first, locationsrc,
                             edge.second, locationdest,
                             edata);
      }
      else {
        ASSERT_MSG(false, "Graph Constructor returned invalid value");
      }
      color = 0;
    }
  }
};

/**
  * \tparam GraphConstructor A subclass of igraph_constructor.
  * 
  * Each process must construct a single instance of the graph constructor.
  * This constructor is then replicated 'max_per_node' times on each machine
  * using the GraphConstructor's copy constructor. In total, 
  * max = max_per_node * dc.numprocs() instances are constructed. 
  * begin() on each instance is called using this value.
  * 
  * \note If run in the distributed setting, all processes must have access to
  * a common distributed file system (such as NFS) 
  * 
  * This function must be called with the same arguments across all the machines.
  * 
  * The constraint is that every edge and every vertex must be added at most 
  * once across all (distributed) GraphConstructors. Arbitrary joining 
  * may result if the same edge/vertex is added more than once.
  * 
  * \param dc The distributed control object
  * \param gc An instance of the GraphConstructor. Must be copy constructable
  * \param max_per_node Number of times 'gc' will be replicated on each machine
  * \param outputbasename The output atom files will be stored as outputbasename.0
  *                       outputbasename.1, etc. With atom index in outputbasename.idx
  *                       In addition, a series of temporary files named 
  *                       [outputbasename]_t... will be created. They will be
  *                       erased at the end of the execution of mr_disk_graph_construction.
  * \param numatoms The number of atoms to create.
  */
template <typename GraphConstructor, typename VertexData, typename EdgeData>
void mr_disk_graph_construction(distributed_control &dc,
                                GraphConstructor &gc, 
                                size_t max_per_node,
                                std::string outputbasename,
                                size_t numatoms) {
  // lets all the machines here first.
  dc.full_barrier();
  if (dc.procid() == 0) {
    logstream(LOG_INFO) << "Mapping over Graph Constructors..." << std::endl;
  }
  {
    // create the local disk graph
    // each machine saves to the same disk graph
    disk_graph<VertexData, EdgeData> dg(outputbasename + "_" + tostr(dc.procid()), numatoms);
    dg.clear();
    /******* Map Phase ***********/
    thread_group thrgrp;
    std::vector<GraphConstructor*> gcs(max_per_node);
    for (size_t i = 0;i < max_per_node; ++i) {
      gcs[i] = new GraphConstructor(gc);
      size_t gcid = dc.procid() * max_per_node + i;
      thrgrp.launch(
          boost::bind(
            &igraph_constructor<VertexData, EdgeData>::mr_disk_graph_construction_map,
            gcs[i],
            boost::ref(dg),
            gcid,
            max_per_node * dc.numprocs()));
    }
    thrgrp.join();
    for (size_t i = 0;i < max_per_node; ++i) {
      delete gcs[i];
    }
    dg.finalize();
  }
  dc.barrier();
  if (dc.procid() == 0) {
    logstream(LOG_INFO) << "Joining Atoms..." << std::endl;
  }
  std::map<size_t, mr_disk_graph_construction_impl::atom_properties> atomprops;
  // split the atoms among the machines
  for (size_t i = dc.procid(); i < numatoms; i += dc.numprocs()) {
    // build a vector of all the parallel atoms
    std::vector<std::string> atomfiles;
    for (size_t j = 0;j < dc.numprocs(); ++j) {
      atomfiles.push_back(outputbasename + "_" + tostr(j) + "." + tostr(i));
    }
    std::string finaloutput = outputbasename + "." + tostr(i);
    atomprops[i] = 
          mr_disk_graph_construction_impl::merge_parallel_disk_atom<VertexData, EdgeData>(atomfiles, finaloutput, i);
  }
  dc.barrier();
  
  // processor 0 joins and built the atom index
  // other processors just send
  if (dc.procid() > 0) {
    dc.send_to(0, atomprops);
  }
  else {
    // receive all the atom_to_adjacent_atoms
    for (procid_t i = 1;i < dc.numprocs(); ++i) {
      std::map<size_t, mr_disk_graph_construction_impl::atom_properties> temp;
      dc.recv_from(i, temp);
      // merge into atom_to_adjacent_atoms
      std::map<size_t, mr_disk_graph_construction_impl::atom_properties>::iterator iter = temp.begin();
      while(iter != temp.end()) {
        ASSERT_TRUE(atomprops.find(iter->first) == atomprops.end());
        atomprops[iter->first] = iter->second;
        ++iter;
      }  
    }
    // done! Now build the atom index
    ASSERT_EQ(atomprops.size(), numatoms);
    atom_index_file idxfile = mr_disk_graph_construction_impl::atom_index_from_properties(atomprops);
    idxfile.write_to_file(outputbasename+".idx");
  }
  dc.barrier();
  
};


}
#endif

