//********************************************************************************
//
// File :           ph02_quick_sort_cluster.cpp
//
// Description :    quick_sort arrays using cluster
//
// Dependencies:    mpi.h
//
// Author :         UlMonkey1987( WU Kan )
//
// Stu NO. :        2010212404
//
// Date :           2011-4-20
//
//********************************************************************************

#include "mpi.h"

#include <iostream>
#include <fstream>
#include <iomanip>

#include <cstdlib>
#include <algorithm>
#include <cmath>

#include <string>

using namespace std;

// bool readData( const string& strTestDataPath, long* pNum, unsigned long numN )
bool readData( const string& strTestDataPath, long* pNum, unsigned long numN )
{
    if ( 0 == pNum ) return false;

    ifstream fin( strTestDataPath.c_str(), iostream::binary );

    char* pByteData = ( char* ) pNum;

    fin.read( pByteData, numN * sizeof( long ) );

    return true;
}
//

// bool isPowerBasedOn2( const int n )
bool isPowerBasedOn2( const int n )
{
    if ( 1 == n ) return true;

    if ( n < 1 || ( n / 2 ) * 2 != n ) return false;

    int t = n;

    while ( t > 1 )
    {
        if ( ( t / 2 ) * 2 != t ) return false;
        t /= 2;
    }

    return true;
}
//

// bool insideSort( long* pNum, const unsigned long startIndex, const unsigned long endIndex )
bool insideSort( long* pNum, const unsigned long startIndex, const unsigned long endIndex )
{
    sort( pNum + startIndex, pNum + endIndex + 1 );

    return true;
}
//

// bool partitionGroup( long* pNum, const long startIndex, const long endIndex, const long median, long& partitionIndex )
bool partitionGroup( long* pNum, const long startIndex, const long endIndex, const long median, long& partitionIndex )
{
    //cout << endl << "partitioning : " << startIndex << " to " << endIndex << " base on median = " << median << endl << endl;

    partitionIndex = startIndex - 1;

    if ( startIndex > endIndex ) return false;

    for ( long i = startIndex; i <= endIndex; i ++ )
    {
        if ( pNum[i] <= median )
        {
            partitionIndex ++;
            long temp = pNum[partitionIndex];
            pNum[partitionIndex] = pNum[i];
            pNum[i] = temp;
        }
    }

    return true;
}
//

// long getMedian( long* pNum, const long startIndex, const long endIndex )
long getMedian( long* pNum, const long startIndex, const long endIndex )
{
    if ( startIndex > endIndex ) return 0;

    unsigned long medianRankIndex = ( startIndex + endIndex ) / 2;

    nth_element( pNum + startIndex, pNum + medianRankIndex, pNum + endIndex + 1 );

    long retNum = pNum[medianRankIndex];

    return retNum;
}
//

// bool exchangePartitionedData( const unsigned long localProcID, const unsigned long localCommSize,
//                               const unsigned long globalProcID, const long partitionIndex,
//                               long* pNum, int& curNumN )
bool exchangePartitionedData( const unsigned long localProcID, const unsigned long localCommSize,
                              const unsigned long globalProcID, const long partitionIndex,
                              long* pNum, int& curNumN )
{
    if ( localProcID < localCommSize / 2  )
    {
        // find the global id of the target process the data exchange should be made from current process
        int targetCommId = globalProcID + localCommSize / 2;
        // ! find the global id of the target process the data exchange should be made from current process

        // send upper part of partitioned data to target process
        long sendDataCount = ( ( partitionIndex > ( long ) ( curNumN - 1 ) ) ? ( 0 ) : ( curNumN - partitionIndex - 1 ) );
        MPI_Send( &sendDataCount, 1, MPI_LONG, targetCommId, 0, MPI_COMM_WORLD );

        // split & send the upper part data
        unsigned long lowerCountRemain = partitionIndex + 1;
        unsigned long upperCountRemain = curNumN - partitionIndex - 1;

        long* pNumTempLowerRemain = new long[lowerCountRemain];
        long* pNumTempUpperRemain = new long[upperCountRemain];

        for ( long i = 0; i <= partitionIndex; i ++ )
            pNumTempLowerRemain[i] = pNum[i];

        for ( long i = partitionIndex + 1; i < curNumN; i ++ )
            pNumTempUpperRemain[i-partitionIndex-1] = pNum[i];

        if ( 0 != sendDataCount )
            MPI_Send( pNumTempUpperRemain, upperCountRemain, MPI_LONG, targetCommId, 1, MPI_COMM_WORLD );
        // ! split & send the upper part data
        // ! send upper part of partitioned data to target process

        // receive lower part of partitioned data from target process
        MPI_Status status;
        long receiveCount = 0;
        MPI_Recv( &receiveCount, 1, MPI_LONG, targetCommId, 0, MPI_COMM_WORLD, &status );

        long* pNumTempLowerNew = new long[receiveCount];

        if ( 0 != receiveCount )
        {
            MPI_Recv( pNumTempLowerNew, receiveCount, MPI_LONG, targetCommId, 1, MPI_COMM_WORLD, &status );
        }
        // ! receive lower part of partitioned data from target process

        // rearrange data on current process
        delete []pNum; pNum = 0;

        curNumN = lowerCountRemain + receiveCount;
        pNum = new long[curNumN];

        for ( unsigned long i = 0; i < lowerCountRemain; i ++ )
            pNum[i] = pNumTempLowerRemain[i];

        for ( long i = 0; i < receiveCount; i ++ )
            pNum[lowerCountRemain+i] = pNumTempLowerNew[i];

        delete []pNumTempLowerRemain; pNumTempLowerRemain = 0;
        delete []pNumTempUpperRemain; pNumTempUpperRemain = 0;
        delete []pNumTempLowerNew; pNumTempLowerNew = 0;
        // ! rearrange data on current process
    }
    else
    {
        // find the global id of the target process the data exchange should be made from current process
        int targetCommId = globalProcID - localCommSize / 2;
        // ! find the global id of the target process the data exchange should be made from current process

        // receive upper part of partitioned data from target process
        MPI_Status status;
        long receiveCount = 0;

        MPI_Recv( &receiveCount, 1, MPI_LONG, targetCommId, 0, MPI_COMM_WORLD, &status );

        long* pNumTempUpperNew = new long[receiveCount];

        if ( 0 != receiveCount )
        {
            MPI_Recv( pNumTempUpperNew, receiveCount, MPI_LONG, targetCommId, 1, MPI_COMM_WORLD, &status );
        }
        // ! receive upper part of partitioned data from target process

        // send lower part of partitioned data to target process
        long sendDataCount = ( ( partitionIndex < 0 ) ? ( 0 ) : ( partitionIndex + 1 ) );

        MPI_Send( &sendDataCount, 1, MPI_LONG, targetCommId, 0, MPI_COMM_WORLD );

        // split & send the lower part data
        unsigned long lowerCountRemain = partitionIndex + 1;
        unsigned long upperCountRemain = curNumN - partitionIndex - 1;

        long* pNumTempLowerRemain = new long[lowerCountRemain];//0;
        long* pNumTempUpperRemain = new long[upperCountRemain];//0;

        for ( long i = 0; i <= partitionIndex; i ++ )
            pNumTempLowerRemain[i] = pNum[i];

        for ( long i = partitionIndex + 1; i < curNumN; i ++ )
            pNumTempUpperRemain[i-partitionIndex-1] = pNum[i];

        if ( 0 != sendDataCount )
            MPI_Send( pNumTempLowerRemain, lowerCountRemain, MPI_LONG, targetCommId, 1, MPI_COMM_WORLD );

        // ! split & send the lower part data
        // ! send lower part of partitioned data to target process

        // rearrange data on current process
        delete []pNum; pNum = 0;

        curNumN = upperCountRemain + receiveCount;
        pNum = new long[curNumN];

        for ( unsigned long i = 0; i < upperCountRemain; i ++ )
            pNum[i] = pNumTempUpperRemain[i];

        for ( long i = 0; i < receiveCount; i ++ )
            pNum[upperCountRemain+i] = pNumTempUpperNew[i];

        delete []pNumTempLowerRemain; pNumTempLowerRemain = 0;
        delete []pNumTempUpperRemain; pNumTempUpperRemain = 0;
        delete []pNumTempUpperNew; pNumTempUpperNew = 0;
        // ! rearrange data on current process
    }
    // ! exchange partition results

    return true;
}
//

int main( int argc, char* argv[] )
{
    if ( argc < 4 )
    {
        cout << "error : lack args." << endl;
        cout << "usage : " << "<program_name>" << " <test_data_path> <n> <output_file_path>." << endl;
        return 1;
    }

    string strTestDataPath = string( argv[1] );     // data src path
    unsigned long numN = atoi( argv[2] );      // numN
    string strOutPath = string( argv[3] );      // output path

//     //args for test
//     string strTestDataPath = "";
//     unsigned long numN = 100;
//     string strOutPath = "result.txt";
//     // ! args for test

    const int MAX_PROC_NAME_LEN = 256;      // max length of a processor name

    int procN;                           // number of process
    int curProcId;                          // id of current process
    char curProcName[MAX_PROC_NAME_LEN];      // name of current process
    int curProcNameLen;                     // the length of current process name

    MPI_Init( &argc, &argv );
    MPI_Comm_size( MPI_COMM_WORLD, &procN );
    MPI_Comm_rank( MPI_COMM_WORLD, &curProcId );
    MPI_Get_processor_name( curProcName, &curProcNameLen );

    // check if the amount of processes is the power of 2
    if ( ! isPowerBasedOn2( procN ) )
    {
        cout << "procN must be the power based on 2.." << endl;

        return 1;
    }
    // ! check if the amount of processes is the power of 2

    cout << "Process " << curProcId << " Running..." << endl << endl;

    double timeStart, timeEnd, timeCost;        // start time, end time, duration time

    long* pNum = 0;                        // data to be sorted
    long* pNumTemp = 0;                     // data handled by a single process
    int* pNumN = new int[procN];                // data count for each process
    int* pOffset = new int[procN];              // offset for each process

    if ( 0 == curProcId )
    {
        // read data
        pNum = new long[numN];
        //readData( strTestDataPath, pNum, numN );
        for ( unsigned long i = 0; i < numN; i ++ )
        {
            pNum[i] = numN - i;
        }
        // ! read data

        // timer started
        timeStart = MPI_Wtime();
        // ! timer started

        // calculate data-counts and offset-counts for every elem
        unsigned long numNPerProcBase = numN / procN;
        unsigned long numNRemain = numN % procN;

        for ( long i = 0; i < procN; i ++ )
        {
            pNumN[i] = numNPerProcBase;
            if ( numNRemain > 0 ) { pNumN[i] ++; numNRemain --; }
        }

        pOffset[0] = 0;
        for ( long i = 1; i < procN; i ++ )
            pOffset[i] = pOffset[i-1] + pNumN[i-1];

        // ! calculate data-counts and offset-counts for every elem
    }

    MPI_Bcast( pNumN, procN, MPI_INT, 0, MPI_COMM_WORLD );          // data count for each process
    MPI_Bcast( pOffset, procN, MPI_INT, 0, MPI_COMM_WORLD );            // data offset for each process

    int curNumN = pNumN[curProcId];                                 // data count of current process
    int curOffset = pOffset[curProcId];                             // data offset of current process

    pNumTemp = new long[curNumN];                                   // data handled by current process

    // scatter data to be sorted into different processes
    MPI_Scatterv( pNum, pNumN, pOffset, MPI_LONG, pNumTemp, curNumN, MPI_LONG, 0, MPI_COMM_WORLD );
    // ! scatter data to be sorted into different processes

    // two communicators for iteration
    MPI_Comm commOld;
    MPI_Comm commNew = MPI_COMM_WORLD;
    // ! two communicators for iteration

    unsigned int subCommN = 1;          // current amount of sub communicators
    while ( subCommN != procN )
    {
        int commIdNew;
        int commSizeNew;
        MPI_Comm_rank( commNew, &commIdNew );       // acquire proc-id in new comm
        MPI_Comm_size( commNew, &commSizeNew );     // acquire proc-count in new comm

        long curMedian = 0;
        if ( 0 == commIdNew )
        {
            // get median
            curMedian = getMedian( pNumTemp, 0, curNumN - 1 );
            // ! get median
        }

        // broadcast median to all processes in the same communicator
        MPI_Bcast( &curMedian, 1, MPI_LONG, 0, commNew );
        // ! broadcast median to all processes in the same communicator

        // partition data on each process
        long partitionIndex = -1;
        partitionGroup( pNumTemp, 0, curNumN - 1, curMedian, partitionIndex );
        // ! partition data on each process

        // exchange partitioned data
        exchangePartitionedData( commIdNew, commSizeNew, curProcId, partitionIndex, pNumTemp, curNumN );
        // ! exchange partitioned data

        commOld = commNew;              // iteration

        MPI_Barrier( MPI_COMM_WORLD );

        // split communicator
        MPI_Comm_split( commOld, curProcId / ( procN / ( subCommN * 2 ) ), 0, &commNew );
        // ! split communicator

        // update sub-communicator size
        subCommN *= 2;
        // ! update sub-communicator size
    }

    // sort local data
    insideSort( pNumTemp, 0, curNumN - 1 );
    // ! sort local data

    // get data amount & offset of data for each process
    MPI_Gather( &curNumN, 1, MPI_INT, pNumN, 1, MPI_INT, 0, MPI_COMM_WORLD );
    if ( 0 == curProcId )
    {
        pOffset[0] = 0;
        for ( long i = 1; i < procN; i ++ )
            pOffset[i] = pOffset[i-1] + pNumN[i-1];
    }
    // ! get data amount & offset of data for each process

    MPI_Bcast( pNumN, procN, MPI_INT, 0, MPI_COMM_WORLD );              // new data amount for each process
    MPI_Bcast( pOffset, procN, MPI_INT, 0, MPI_COMM_WORLD );            // new data offset for each process

    // gather all sorted data
    MPI_Gatherv( pNumTemp, curNumN, MPI_LONG, pNum, pNumN, pOffset, MPI_LONG, 0, MPI_COMM_WORLD );
    // ! gather all sorted data

    delete []pNumTemp; pNumTemp = 0;
    delete []pNumN; pNumN = 0;
    delete []pOffset; pOffset = 0;

    if ( 0 == curProcId )
    {
        // timer stopped
        timeEnd = MPI_Wtime();
        // ! timer stopped

        timeCost = timeEnd - timeStart;

//         for ( unsigned long i = 0; i < numN; i ++ )
//         {
//             cout << pNum[i];
//             if ( numN - 1 != i ) cout << ",";
//         }
//         cout << endl;
        delete []pNum; pNum = 0;

//        cout << endl << "cost time : " << timeCost << endl;

        // output result
        ofstream fout( strOutPath.c_str(), ios::app );
        fout << "cluster\t" << strTestDataPath << "\t" << "quick_sorting\t" << procN << "\t" << numN << "\t" << setprecision( 2 ) << timeCost << "\n";
        // ! output result

        cout << endl << "cluster " << strTestDataPath << " " << "quick_sorting " << procN << " " << numN << " " << setprecision( 2 ) << timeCost << endl;
    }

    // system( "pause" );

    MPI_Finalize();

    return 0;
}
