#include <sstream>
#include <iostream>
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
#include <Timer.hpp>
#include <BaseDedup.hpp>
#include <PrettyPrints.hpp>
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
using namespace std;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void BaseDedupThread::ResetProgress( void ) {
    accumProgress = 0.0;
    nextProgress = updateInterval;
}
void BaseDedupThread::FinalizeProgress( void ) {
    // done with a pass, notify parent of progress update
    // tick the pass index
    parent->ProgressUpdate( uint32_t( updateInterval ), passIdx );
    passIdx++;
}
void BaseDedupThread::ProgressUpdate( const uint32_t currPassIdx ) {
    
    float current = accumProgress + imageStream.pcnt_through();
    
    if( current > numChunks * nextProgress ) {
        parent->ProgressUpdate( uint32_t( updateInterval ), currPassIdx );
        nextProgress += updateInterval;
    }
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void BaseDedupThread::SetFileVectorPtrs( const vector< string > * _img_files, const vector< string > * _ref_files ) {
    img_files = _img_files;
    ref_files = _ref_files;
    
    assert( img_files->size() == ref_files->size() );

    numChunks = img_files->size();
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void BaseDedupThread::SanityOpenFiles( const char * mem_image_path, const char * ref_count_path ) {

    imageStream.open( mem_image_path, disklock, sync );
    refctStream.open( ref_count_path, disklock, sync );
    
    const uint64_t imgSize = imageStream.size();
    const uint64_t refSize = refctStream.size();
    
    if( string( ref_count_path ) == string( "/dev/null" ) ) {
        // pointed at /dev/null for reference counts:
        assert( refSize == 0ULL );
    }
    else {
        // reference counts are optional    
        const uint64_t numPagesByImg = imgSize / pageSize;
        const uint64_t numPagesByRef = refSize / sizeof( uint64_t );
        WARN( refSize % sizeof( uint64_t ) == 0ULL, "refSize = " << refSize << ", ref_count_path = " << ref_count_path );
        WARN( numPagesByRef == numPagesByImg, "numPagesByRef = " << numPagesByRef << ", numPagesByImg = " << numPagesByImg );
    }
    
    WARN( imgSize % pageSize == 0ULL, "imgSize = " << imgSize << ", pageSize = " << pageSize << ", imgSize % pageSize = " << ( imgSize % pageSize ) << ", mem_image_path = " << mem_image_path );
    WARN( imageStream.BufferSize() % pageSize == 0ULL, "buffer size not evenly divisible by page size" );
    WARN( imageStream.BufferSize() % sizeof( uint64_t ) == 0ULL, "buffer size not evenly divisible by ref count read size" );
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
uint64_t BaseDedupMaster::SumMemImageChunkSizes( void ) const {

    uint64_t bytes = 0ULL;
    
    const uint32_t numImgFiles = img_files.size();

    for( uint32_t i = 0; i < numImgFiles; i++ ) {
        bytes += StatFileSize( img_files[ i ].c_str() );
    }

    return bytes;
}
void BaseDedupMaster::BuildFileNameVectors( const char * mem_image_path, const char * ref_count_path ) {
    
    const bool devnull = ( string( ref_count_path ) == "/dev/null" ) || ( string( ref_count_path ) == "/dev/null/" );
    
    img_files.clear();
    ref_files.clear();
    
    stringstream dvn;
    dvn << "/dev/null";
    
    if( impl ) {
        for( uint32_t chunkIdx = 0; chunkIdx < numChunks; chunkIdx++ ) {
            stringstream ifn;
            stringstream rfn;
        
            // basically using the existence (or not) of the reference counts
            // to indicate if LiME was used and the chunks are specified with a suffix
        
            // appending the chunk index as part of the file name
            // base_name.0000 or base_name.0001 (e.g.)
            ifn << mem_image_path << "." << ZERODEC( 4 ) << chunkIdx;
            rfn << ref_count_path << "." << ZERODEC( 4 ) << chunkIdx;
            
            if( devnull ) {
                img_files.push_back( ifn.str() );
                ref_files.push_back( dvn.str() );                
            }
            else {
                img_files.push_back( ifn.str() );
                ref_files.push_back( rfn.str() );
            }
        }
    }
    else {
        if( devnull ) {
            img_files.push_back( mem_image_path );
            ref_files.push_back( "/dev/null"    );                
        }
        else {
            img_files.push_back( mem_image_path );
            ref_files.push_back( ref_count_path );
        }
    }
}
void BaseDedupMaster::DivideFilesByThreads( vector< BaseDedupThread * > & ddthreads ) {
    
    totalNumBytes = 0ULL;
    
    StatFile imgfile;
    StatFile reffile;

    uint32_t numFiles = img_files.size();
    uint32_t fileIdx  = 0;

    while( numFiles ) {
        for( uint32_t tidx = 0; tidx < numThreads; tidx++ ) {
            thread_img_files[ tidx ].push_back( img_files[ fileIdx ] );
            thread_ref_files[ tidx ].push_back( ref_files[ fileIdx ] );

            // calculate total number of bytes
            imgfile.open( img_files[ fileIdx ].c_str() );
            reffile.open( ref_files[ fileIdx ].c_str() );

            totalNumBytes += imgfile.size();
            totalNumBytes += reffile.size();
            imgfile.close();
            reffile.close();
            
            fileIdx++;
            numFiles--;
            if( numFiles == 0 ) { break; }
        }
    }
    
    for( uint32_t tidx = 0; tidx < numThreads; tidx++ ) {
        ddthreads[ tidx ]->SetFileVectorPtrs( &thread_img_files[ tidx ], &thread_ref_files[ tidx ] );
    }
}
void BaseDedupMaster::ProgressMessage( const uint32_t currPassIdx ) {
    // read the time elapsed:
    timer.Stop();
    double t = timer.TotalTime();
    
    // bytes read & remain:
    double read = nextProgress * double( totalNumBytes ) / 100.0;
    double togo = double( totalNumBytes ) - read;
    
    // bw & time estimate:
    double diskBW = read / t;
    double tPass  = togo / diskBW;
    double tTotal = tPass + double( numPasses - currPassIdx - 1 ) * double( totalNumBytes ) / diskBW;
    
    // convert to MB/sec:
    diskBW /= ( 1024.0 * 1024.0 );
    
    // print progress & disk bw & time estimate:
    cout << "pass " << SETDEC( 2 ) << (currPassIdx+1) << " of " << SETDEC( 2 ) << numPasses << ", " << SETFIX( 3, 0 ) << (totalProgress / numThreads) << '%' << " complete, ";
    cout << "disk bw = " << SETFIX( 4, 0 ) << diskBW << " [MB/sec]" << ", " << PrettyTime( tPass ) << " remain this pass, " << PrettyTime( tTotal ) << " remain total" << endl;
    nextProgress += updateInterval;
    
    // restart the timer:
    timer.Start();
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void BaseDedupThread::LoadMemImage( const char * mem_image_path, const char * ref_count_path, const uint64_t currPassIdx ) {

    lineIdx = 0;
    hugeIdx = 0;
    
    SanityOpenFiles( mem_image_path, ref_count_path );
    
    while( imageStream.read_line( line ) ) {
        
        UpdateRefCounts();
        
        // run dedup for different granularities:
        // ... 1. huge (2MB) pages
        // ... 2. page (4kB)
        // ... 3. line (64B)
        HugeDedup( currPassIdx );
        PageDedup( currPassIdx );
        LineDedup( currPassIdx );
        
        // show a progress meter:
        ProgressUpdate( currPassIdx );
    }
    
    // for some reason (?) the memory image does not always align on a 4kB or 2MB boundary
    // thus, go to the edge of the 2MB boundary with zeros
    line->SetToZero();
    while( hugeIdx ) {
        HugeDedup( currPassIdx );
        PageDedup( currPassIdx );
        LineDedup( currPassIdx );
    }
        
    // cleanup:
    imageStream.close();
    refctStream.close();
}
void BaseDedupThread::LoadMemImages( void ) {

    ResetProgress();

    // these counts accumulate on each pass through (so reset them):
    lineCounts.ResetNonUnique();
    pageCounts.ResetNonUnique();
    hugeCounts.ResetNonUnique();
    
    for( uint32_t chunkIdx = 0; chunkIdx < numChunks; chunkIdx++ ) {
        LoadMemImage( img_files->at( chunkIdx ).c_str(), ref_files->at( chunkIdx ).c_str(), passIdx );
        accumProgress += 100.0;
    }
    FinalizeProgress();
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void BaseDedupMaster::PrintHSep( void ) const {
    cout << "--------------------------------------------------------------------------------" << endl;
}
void BaseDedupMaster::PrintRCHisto( void ) const {
    uint64_t vlinecount = 0ULL;
    for( map< uint64_t, uint64_t >::const_iterator it = rchisto.begin(); it != rchisto.end(); it++ ) {
        const uint64_t    rc = it->first;
        const uint64_t count = it->second;
        cout << "rc = " << rc << ", count = " << count << endl;
        vlinecount += rc * count;
    }
    vlinecount += lineCounts.glob.zero;
    WARN( vlinecount == lineCounts.glob.total, "vlinecount = " << vlinecount << ", lineCounts.glob.total = " << lineCounts.glob.total );
}
void BaseDedupMaster::PrintDedupStatsKernel( const DedupCounts & lineCounts, const DedupCounts & pageCounts, const DedupCounts & hugeCounts, const char * title ) const {
    
    uint64_t numTLines = 0ULL;
    numTLines = lineCounts.total / 16ULL;
    if( lineCounts.total % 16ULL ) {
        numTLines++;
    }
    WARN( lineCounts.rc_tot + lineCounts.zero == lineCounts.total, "lineCounts.rc_tot = " << lineCounts.rc_tot << ", lineCounts.zero = " << lineCounts.zero << ", lineCounts.total = " << lineCounts.total );
    WARN( pageCounts.rc_tot + pageCounts.zero == pageCounts.total, "pageCounts.rc_tot = " << pageCounts.rc_tot << ", pageCounts.zero = " << pageCounts.zero << ", pageCounts.total = " << pageCounts.total );
    
    const double bytesPerULine = 66.0;
    const double bytesPerTLine = 64.0;
    const double bytesPerDLine = 64.0;
    
    const double nonddBytes = bytesPerDLine * double( lineCounts.total  );
    const double ulineBytes = bytesPerULine * double( lineCounts.unique );
    const double tlineBytes = bytesPerTLine * double( numTLines );
    const double dedupBytes = ulineBytes + tlineBytes;
    
    const double rawdedup = double( lineCounts.total ) / double( lineCounts.unique );
    const double actdedup = nonddBytes / dedupBytes;
    
    const double pagededup = double( pageCounts.total ) / double( pageCounts.unique );
    const double hugededup = double( hugeCounts.total ) / double( hugeCounts.unique );
    
    const uint64_t bytesPerHuge = 2ULL * 1024ULL * 1024ULL;
    const uint64_t bytesPerPage = 4096ULL;
    const uint64_t bytesPerLine =   64ULL;
    
    PrintHSep();
    cout << title << endl;
    cout << "4k pages, total:              " << PrettyInt( pageCounts.total , 16 ) << " | " << PrettyBytes( bytesPerPage * pageCounts.total,  16 ) << endl;
    cout << "4k pages, zero:               " << PrettyInt( pageCounts.zero  , 16 ) << " | " << PrettyBytes( bytesPerPage * pageCounts.zero,   16 ) << endl;
    cout << "4k pages, non-zero & unique:  " << PrettyInt( pageCounts.unique, 16 ) << " | " << PrettyBytes( bytesPerPage * pageCounts.unique, 16 ) << endl;
    cout << "2M pages, total:              " << PrettyInt( hugeCounts.total , 16 ) << " | " << PrettyBytes( bytesPerHuge * hugeCounts.total,  16 ) << endl;
    cout << "2M pages, zero:               " << PrettyInt( hugeCounts.zero  , 16 ) << " | " << PrettyBytes( bytesPerHuge * hugeCounts.zero,   16 ) << endl;
    cout << "2M pages, non-zero & unique:  " << PrettyInt( hugeCounts.unique, 16 ) << " | " << PrettyBytes( bytesPerHuge * hugeCounts.unique, 16 ) << endl;
    cout << endl;
    cout << "4k page dedup = " << SETFIX( 0, 2 ) << pagededup << "x" << endl;
    cout << "2M page dedup = " << SETFIX( 0, 2 ) << hugededup << "x" << endl;
    cout << endl;
    
    cout << endl;
    cout << "lines, total:              " << PrettyInt( lineCounts.total , 16 ) << " | " << PrettyBytes( bytesPerLine * lineCounts.total,  16 ) << endl;
    cout << "lines, zero:               " << PrettyInt( lineCounts.zero  , 16 ) << " | " << PrettyBytes( bytesPerLine * lineCounts.zero,   16 ) << endl;
    cout << "lines, non-zero & unique:  " << PrettyInt( lineCounts.unique, 16 ) << " | " << PrettyBytes( bytesPerLine * lineCounts.unique, 16 ) << endl;
    cout << "translation lines:         " << PrettyInt( numTLines        , 16 ) << " | " << PrettyBytes( bytesPerLine * numTLines,         16 ) << endl;
    cout << endl;

    cout << "translation line bytes:                              tlineBytes = " << PrettyBytes( tlineBytes, 16 ) << endl;
    cout << "data line bytes (including signatures & refcounts):  ulineBytes = " << PrettyBytes( ulineBytes, 16 ) << endl;
    cout << "deduplicate storage requirement in bytes:            dedupBytes = " << PrettyBytes( dedupBytes, 16 ) << endl;
    cout << endl;
    cout << "ttu ratio = " << SETFIX( 0, 2 ) << rawdedup << "x" << endl;
    cout << "est dedup = " << SETFIX( 0, 2 ) << actdedup << "x" << endl;    
    PrintHSep();
}
void BaseDedupMaster::PrintDedupStats( void ) const {
    PrintDedupStatsKernel( lineCounts.glob, pageCounts.glob, hugeCounts.glob, "all of physical memory memory" );
    PrintDedupStatsKernel( lineCounts.kern, pageCounts.kern, hugeCounts.kern, "kernel allocated memory" );
    PrintDedupStatsKernel( lineCounts.user, pageCounts.user, hugeCounts.user, "user allocated memory"   );
}
void BaseDedupMaster::PrintHashTableStats( void ) const {
    cout << "fg->NumOverflows() = " << fg->NumOverflows() << endl;
    cout << "pg->NumOverflows() = " << pg->NumOverflows() << endl;
    cout << "hg->NumOverflows() = " << hg->NumOverflows() << endl;
    cout << "fg->Load() = " << SETFIX( 0, 2 ) << fg->Load() << endl;
    cout << "pg->Load() = " << SETFIX( 0, 2 ) << pg->Load() << endl;
    cout << "hg->Load() = " << SETFIX( 0, 2 ) << hg->Load() << endl;
}
void BaseDedupMaster::PrintStats( void ) const {

    PrintRCHisto();
    PrintDedupStats();
    
    if( verbose ) {
        PrintHashTableStats();
    }
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void BaseDedupMaster::BuildRCHisto( void ) {
    fg->BuildRCHisto( rchisto );
}
void BaseDedupMaster::AccumUnique( void ) {
    lineCounts.glob.unique += fg->AUnique();
    lineCounts.kern.unique += fg->KUnique();
    lineCounts.user.unique += fg->UUnique();
    pageCounts.glob.unique += pg->AUnique();
    pageCounts.kern.unique += pg->KUnique();
    pageCounts.user.unique += pg->UUnique();
    hugeCounts.glob.unique += hg->AUnique();
    hugeCounts.kern.unique += hg->KUnique();
    hugeCounts.user.unique += hg->UUnique();
}
void BaseDedupMaster::AccumTotals( void ) {
    lineCounts.glob.rc_tot += fg->ATotals();
    lineCounts.kern.rc_tot += fg->KTotals();
    lineCounts.user.rc_tot += fg->UTotals();
    pageCounts.glob.rc_tot += pg->ATotals();
    pageCounts.kern.rc_tot += pg->KTotals();
    pageCounts.user.rc_tot += pg->UTotals();
    hugeCounts.glob.rc_tot += hg->ATotals();
    hugeCounts.kern.rc_tot += hg->KTotals();
    hugeCounts.user.rc_tot += hg->UTotals();
}
void BaseDedupMaster::AccumCounts( void ) {

    lineCounts.ResetNonUnique();
    pageCounts.ResetNonUnique();
    hugeCounts.ResetNonUnique();
    
    fg->AccumCounts();
    pg->AccumCounts();
    hg->AccumCounts();
    AccumUnique();
    AccumTotals();
    
    for( uint32_t tidx = 0; tidx < numThreads; tidx++ ) {
        lineCounts.glob.total += bddthreads[ tidx ]->lineCounts.glob.total;
        lineCounts.kern.total += bddthreads[ tidx ]->lineCounts.kern.total;
        lineCounts.user.total += bddthreads[ tidx ]->lineCounts.user.total;
        lineCounts.glob.zero  += bddthreads[ tidx ]->lineCounts.glob.zero;
        lineCounts.kern.zero  += bddthreads[ tidx ]->lineCounts.kern.zero;
        lineCounts.user.zero  += bddthreads[ tidx ]->lineCounts.user.zero;
    
        pageCounts.glob.total += bddthreads[ tidx ]->pageCounts.glob.total;
        pageCounts.kern.total += bddthreads[ tidx ]->pageCounts.kern.total;
        pageCounts.user.total += bddthreads[ tidx ]->pageCounts.user.total;
        pageCounts.glob.zero  += bddthreads[ tidx ]->pageCounts.glob.zero;
        pageCounts.kern.zero  += bddthreads[ tidx ]->pageCounts.kern.zero;
        pageCounts.user.zero  += bddthreads[ tidx ]->pageCounts.user.zero;
    
        hugeCounts.glob.total += bddthreads[ tidx ]->hugeCounts.glob.total;
        hugeCounts.kern.total += bddthreads[ tidx ]->hugeCounts.kern.total;
        hugeCounts.user.total += bddthreads[ tidx ]->hugeCounts.user.total;
        hugeCounts.glob.zero  += bddthreads[ tidx ]->hugeCounts.glob.zero;
        hugeCounts.kern.zero  += bddthreads[ tidx ]->hugeCounts.kern.zero;
        hugeCounts.user.zero  += bddthreads[ tidx ]->hugeCounts.user.zero;
    }
}







