#ifndef _FirstPhase_H_ // include guard
#define _FirstPhase_H_

/******************************************************************************

Description:
  This class handles the first phase of the algorithm. It's actually divided 
  into two itself. In the first part items and their counts are found, these 
  local f_lists are then reduced two by two. 

  Second part of the first phase starts when nodes use this global f_list to 
  create local fp-trees. Local fp-trees are similarly reduced two by two and 
  thus global fp-trees are constructed. This ends the first phase.

Notes:
  Different binary executable code can be produced from this class depending
  on the different compiler macros defined. These macros allow producing:
    *  serial version without Boost::MPI overhead. (don't define USE_MPI macro)
    *  parallel version that uses MPI reduce operations. (define USE_MPI macro)
    *  parallel version not using MPI reduce but doing plain send/recv ops.
       (define USE_MPI macro AND also SIMPLE_REDUCE macro)
  
  Different executables come with different names. Use cpSerial for the serial 
  implementation without the overhead of MPI, use cpMpiReduce for the version
  which uses MPI_Reduce function in reducing potentially large fp-trees.
  cpSimpleReduce executable also uses MPI in the background, but instead of 
  native reduce operation it does send & recv. cpSimpleReduce can only be used
  with 2 processes at the moment.

Author: Tayfun Sen
Website: http://blog.tayfunsen.com
 
 *****************************************************************************/


// Standard libraries.
#include <iostream>
#include <fstream>
#include <sstream>
#include <ext/hash_map>
#include <algorithm>
#include <time.h>
// Boost libraries for serializing hash_map's.
#include <boost/tokenizer.hpp>
#include <boost/archive/tmpdir.hpp>
#include <boost/archive/text_oarchive.hpp>
#include <boost/archive/binary_oarchive.hpp>
#include <boost/serialization/export.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/utility.hpp>
#include <boost/serialization/hash_map.hpp>
#include <boost/serialization/map.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/serialization/string.hpp>
#include <boost/mpi.hpp>
// Custom libraries and headers.
#include "shared.hpp"
#include "Globals.hpp"
#include "Functors.hpp"
#include "Tree.hpp"


using namespace std;
using namespace boost;
using namespace __gnu_cxx;

// Tell boost to never track Tree objects. Thus, we can serialize it without 
// making a const_cast.
// See: http://www.boost.org/doc/libs/1_32_0/libs/serialization/doc/traits.html
// BOOST_CLASS_TRACKING(Tree, track_never);

/** 
 *  @brief Define this macro to avoid expensive MPI_Pack/Unpack calls on 
 *  homogeneous machines.
*/
#define BOOST_MPI_HOMOGENEOUS

class FirstPhase {
    public:
            FirstPhase() {
                // Partition size is used when it is not the last processor.
                // Else if rank = size - 1, then the processor will process 
                // till the last line.
                partSize = Globals::totalTransactions / Globals::size;
                startXact = partSize * Globals::rank + 1;
                if (Globals::rank == Globals::size - 1)
                    partSize = Globals::totalTransactions - 
                        partSize * (Globals::size - 1);
                transactions = new vector< vector<string> * >;
            }

            Tree & getFpTree() {
                return mergedTree;
            }

            int run();

            // Kinda like a destructor but frees only the space we don't need.
            void clear();

    private:
            // Vector of vector of strings to keep the local database.
            vector< vector<string> * > * transactions;
            // Hash for keeping the items and their counts.
            hash_map<string, int> itemCounts, reducedCounts;
#ifdef USE_MPI
            // MPI Communicator.
            mpi::communicator world;
#endif
            // start line is the transaction number it will be starting at.
            // partition size is the number of transactions it will be 
            // processing.
            int startXact, partSize;
            Tree fpTree, mergedTree;

            /************************************
             * Functions definitions start now.
             * *********************************/
            // first part of the first phase is finding local counts.
            int findLocalCounts();
            void buildFpTree();

            void reduceAndBcastCounts();
            void allReduceCounts();
            void reduceAndBcastTrees();

            void pruneCounts();
            void pruneTransactions();
            
            void sortLocal();
            
            void printTransactions();
            void printCounts();
};

#endif
