//Allocator with memory defragmentation for large data blocks 
// $Id: alloc_big.cpp 654 2011-02-21 17:07:19Z Oleg.Bulychov $
#include "stdafx.h"

#include "src/heo/include/lf_endl.h"
#include "src/heo/include/sys_alloc.h"
#include "src/heo/include/alloc_lite.h"
#include "src/heo/include/alloc_big.h"

//__________________________________________________________________________________
//__________________________________________________________________________________
namespace mem_util
{
//__________________________________________________________________________________
class big_allocator_impl
{
    DISALLOW_COPY_AND_ASSIGN(big_allocator_impl);
//.............................................................................
//:size of defragmented area
    enum { MIN_SOLID_SPACE = 16 * MEM_PAGE_SIZE };
//.............................................................................
    typedef mem_util::TFloHeader TFloHeader;
//.............................................................................
    TFloHeader* first_header_;
    TFloHeader* current_header_;
    TFloHeader* last_header_;

    size_t reserve_in_header_;

    size_t total_avail_size_;
    size_t left_avail_size_;//"trash size" before current header
    size_t right_avail_size_;//"trash size" after current header (include it!)

    size_t flo_pages_max_;

#if (MEM_STAT > 1)
//statistics, useful or may be not...
    int64_t        allocs_;
    int64_t        alloc_sum_;
    int64_t        reallocs_;
    int64_t        movreallocs_count_;
    int64_t        defragments_;
    int64_t        defragment_moves_;
    int64_t        realloc_moves_;
#endif
#if (MEM_STAT > 0)
    size_t         cur_allocs_;
    size_t         peak_allocs_;
    size_t         cur_usage_;
    size_t         peak_usage_;
#endif

    sys_page_allocator sys_alloc_;
    int rank_;
public:
    explicit big_allocator_impl(int nRank) NO_THROW_():
        //current_static_page_(),
        //static_page_count_(),
        //static_page_max_(),

        flo_pages_max_(),

        reserve_in_header_(),

        total_avail_size_(),
        left_avail_size_(),
        right_avail_size_(),

#if (MEM_STAT > 1)
        allocs_(),
        alloc_sum_(),
        reallocs_(),
        movreallocs_count_(),
        defragments_(),
        defragment_moves_(),
        realloc_moves_(),
#endif
#if (MEM_STAT > 0)
        cur_allocs_(),
        peak_allocs_(),
        cur_usage_(),
        peak_usage_(),
#endif

        sys_alloc_(),
        rank_(nRank)
    {
        COMPILETIME_CHECK(ROUND2(sizeof(TFloHeader), MEM_GRANULARITY) ==
            sizeof(TFloHeader), please_Align_TFloHeader);
    }
//.............................................................................
#if (MEM_STAT > 0)
    ~big_allocator_impl() NO_THROW_()
    {
        ASSERT_(!cur_usage_);
        ASSERT_(!cur_allocs_);
    }
#endif
//.............................................................................
    int construct(size_t mb) NO_THROW_()
    {
        ASSERT_(!flo_pages_max_);
        mb *= 1024 * 1024;
        flo_pages_max_ = mb / MEM_PAGE_SIZE;
        if (mb % MEM_PAGE_SIZE)
            ++flo_pages_max_;
        if (!sys_alloc_.Alloc(flo_pages_max_))
            return 0;

        size_t max_block_size = flo_pages_max_ * MEM_PAGE_SIZE -
            sizeof(TFloHeader);
        first_header_ = new(sys_alloc_.get_Buffer()) TFloHeader(max_block_size, 0);
        first_header_->prev_offset_ = 0;
        current_header_ = first_header_;
        last_header_ = new((char*)(sys_alloc_.get_Buffer()) + max_block_size) TFloHeader(0, 0);
        last_header_->set_Prev(first_header_);

//:calc free memory:
        total_avail_size_ =
            right_avail_size_ = first_header_->avail_Size();
        return 1;
    }
//.............................................................................
//.............................................................................
    void** Alloc(size_t size, size_t reserved = 0, bool bZeroInit = false) NO_THROW_()
    {
#if (MEM_DEBUG_MODE > 1)
        flo_Self_Test();
#endif//MEM_DEBUG_MODE

        size_t new_used_size = ROUND2(size + sizeof(TFloHeader), MEM_GRANULARITY);

        if (new_used_size > total_avail_size_)
        {
            Mem_Panic(new_used_size, &total_avail_size_);
            VERIFY_(new_used_size <= total_avail_size_);
        }
#if (MEM_STAT > 1)
        alloc_sum_ += size;
        ++allocs_;
#endif
#if (MEM_STAT > 0)
        cur_usage_ += new_used_size;
        if (cur_usage_ > peak_usage_)
            peak_usage_ = cur_usage_;
        if (++cur_allocs_ > peak_allocs_)
            peak_allocs_ = cur_allocs_;
#endif

        void** back_ptr = make_Static_Pointer();
        TFloHeader* new_header = make_Flo_Header(size, new_used_size, reserved);
        *back_ptr = new_header + 1;
        new_header->back_ptr_ = back_ptr;

        if (bZeroInit)
            memset((char*)*back_ptr, 0, size);

#if (MEM_DEBUG_MODE > 1)
        flo_Self_Test();
#endif
        return back_ptr;
    }
//.............................................................................
    inline void** make_Static_Pointer()
    {
        void* ptr = lite_allocator::Alloc<void*>();
        return (void**)ptr;
    }
//.............................................................................
    TFloHeader* make_Flo_Header(size_t size, size_t new_used_size,
        size_t reserved) NO_THROW_()
    {
        ASSERT_(total_avail_size_ - left_avail_size_ == right_avail_size_);
        ASSERT_(new_used_size <= total_avail_size_);

        size_t solid_size = std::max(size_t(MIN_SOLID_SPACE), new_used_size);

        if (right_avail_size_ < new_used_size || //:panic - no room at right!
            (right_avail_size_ < solid_size &&   //:or right is too small
            left_avail_size_ >= MIN_SOLID_SPACE))//:but left is big
        {
            left_avail_size_ = 0;
            right_avail_size_ = total_avail_size_;
            current_header_ = first_header_;//:then jump to left/first
        }

        size_t avail_size = current_header_->avail_Size();

        TFloHeader* next = current_header_->next_Ptr();

        solid_size = std::min(solid_size, right_avail_size_);

        if (solid_size > avail_size)
            defragment(next, avail_size, solid_size);//:key feature!!!

//:dec. free mem:
        total_avail_size_ -= new_used_size;
        right_avail_size_ -= new_used_size;

        size_t current_used_size = current_header_->used_Size();
        if (reserve_in_header_ && avail_size >= reserve_in_header_ + new_used_size)
        {
            current_header_->block_size_ = reserve_in_header_ + current_used_size;
            left_avail_size_ += reserve_in_header_;
            right_avail_size_ -= reserve_in_header_;
            avail_size -= reserve_in_header_;
        }
        else
        {
            current_header_->block_size_ = current_used_size;//:trim
        }

//:construct semi-valid header (w/o back_ptr!):
        TFloHeader* new_header = new(current_header_->next_Ptr())
            TFloHeader(avail_size, size);

        new_header->set_Prev(current_header_);

        current_header_ = new_header;
        if (reserved > size)
            reserve_in_header_ = ROUND2(reserved - size, MEM_GRANULARITY);
        else
            reserve_in_header_ = 0;

        next->set_Prev(new_header);
        return new_header;
    }
//.............................................................................
//:defragment the blocks, until solid_size bytes appears after current
    void defragment(TFloHeader*& next, size_t& avail_size,
        size_t solid_size) NO_THROW_()
    {
#if (MEM_DEBUG_MODE > 1)
        flo_Self_Test();
#endif

#if (MEM_STAT > 1)
        ++defragments_;
#endif
        reserve_in_header_ = 0;

        while(!avail_size)
        {//:unable to move (yet)
            current_header_ = next;
            next = next->next_Ptr();//:so skip this block
            avail_size = current_header_->avail_Size();
        }

        size_t move_bytes = 0;

        for(; solid_size > avail_size;)
        {
//trim:
            current_header_->block_size_ = current_header_->used_Size();
//:calc new addr:
            TFloHeader* head = current_header_->next_Ptr();
//:repair prev link:
            next->prev_offset_ = (char*)head - (char*)current_header_;

//:now we have "trash" area after current, before next.

//:collect all non-fragmented blocks after current into the chain:
            TFloHeader* chain = next;
            size_t chain_used_size = next->used_Size();

            for(;;)
            {
//:repair back_ptr:
                *(next->back_ptr_) = (char*)(next + 1) - avail_size;

                if (0 != next->avail_Size())
                    break;//end of chain

                next = next->next_Ptr();
                chain_used_size += next->used_Size();
            }

            TFloHeader* next_next = next->next_Ptr();

            next->block_size_ += avail_size;

//:now, move the chain into this "trash" area:

            memmove(head, chain, chain_used_size);
            move_bytes += chain_used_size;

//:repair:
            current_header_ = (TFloHeader*)((char*)next - avail_size);

            next = next_next;

            avail_size = current_header_->avail_Size();
        }
        next->set_Prev(current_header_);

#if (MEM_STAT > 1)
        defragment_moves_ += move_bytes;
#endif

#if (MEM_DEBUG_MODE > 1)
        flo_Self_Test();
#endif
    }
//.............................................................................
    void free_Static_Pointer(void** static_ptr) NO_THROW_()
    {
        ASSERT_(*static_ptr);
        lite_allocator::Free(static_ptr);
    }
//.............................................................................
    void free_Flo_Header(TFloHeader* header) NO_THROW_()
    {
        TFloHeader* prev = header->prev_Ptr();

        size_t trash = header->used_Size();

//:inc. free mem:
        total_avail_size_ += trash;

        if (header < current_header_)
        {
            left_avail_size_ += trash;
        }
        else
        if (header == current_header_)
        {
            size_t prev_avail_Size = prev->avail_Size();
            left_avail_size_ -= prev_avail_Size;
            right_avail_size_ += prev_avail_Size + trash;
//:then repair current header:
            current_header_ = prev;
            if (first_header_ == prev)
                reserve_in_header_ = 0;
            else
                reserve_in_header_ = prev_avail_Size;
        }
        else
        {//>
            right_avail_size_ += trash;
        }


//:merge this block with previous:
        prev->block_size_ += header->block_size_;

        TFloHeader* next = header->next_Ptr();

//:repairs next link
        next->set_Prev(prev);

#if (MEM_DEBUG_MODE > 0)
        memset(header, 0xfe, trash);//:poison on free!
#endif
    }
//.............................................................................
    void Realloc(void* ptr, size_t size, size_t reserved = 0,
        bool bZeroInit = false) NO_THROW_()
    {
#if (MEM_STAT > 1)
        ++reallocs_;
#endif
        TFloHeader* re_header = &((TFloHeader*)(ptr))[-1];

#if (MEM_DEBUG_MODE > 1)
        flo_Self_Test();
#endif

#if (MEM_DEBUG_MODE > 0)
        current_header_->check_Magic();
        re_header->check_Magic();
#endif

        size_t old_data_size = re_header->data_size_;
        if (size == old_data_size)
        {
            return;//:nothing to do...
        }
        else
        if (size < old_data_size)
        {
            flo_Realloc_Down(re_header, size);
#if (MEM_DEBUG_MODE > 1)
            flo_Self_Test();
#endif
            return;//:realloc in place - trim
        }
        else
        {
//size > old_data_size
            size_t new_used_size = ROUND2(size + sizeof(TFloHeader), MEM_GRANULARITY);
            size_t add_used_size = new_used_size - re_header->used_Size();
#if (MEM_STAT > 0)
            cur_usage_ += add_used_size;
            if (peak_usage_ < cur_usage_)
                peak_usage_ = cur_usage_;
#endif
            if (new_used_size <= re_header->block_size_)
            {//:realloc in place - grow
                flo_Realloc_Up_In_Place(re_header, size, add_used_size, bZeroInit);
            }
            else
            {//:need to move :(
                if (new_used_size > total_avail_size_)//:sic - add_used_size is not enough
                {
                    Mem_Panic(new_used_size, &total_avail_size_);
                    VERIFY_(new_used_size <= total_avail_size_);
                }

                void** back_ptr = re_header->back_ptr_;//:so save static pointer

//:part of alloc:
                TFloHeader* header = make_Flo_Header(size, new_used_size, reserved);

//:move old_header to header:
                TFloHeader* old_header = &((TFloHeader*)*back_ptr)[-1];

                ptr = (void*)(header + 1);

                memcpy(ptr, (void*)(old_header + 1), old_data_size);
#if (MEM_STAT > 1)
                realloc_moves_ += old_data_size;
                ++movreallocs_count_;
#endif

//:part of free:
                free_Flo_Header(old_header);

//:repair:
                header->back_ptr_ = back_ptr;

                *header->back_ptr_ = ptr;
                if (bZeroInit)
                    memset((char*)ptr + old_data_size, 0, size - old_data_size);
            }
        }

#if (MEM_DEBUG_MODE > 1)
        flo_Self_Test();
#endif
    }
//.............................................................................
    void flo_Realloc_Down(TFloHeader* re_header, size_t size) NO_THROW_()
    {
        size_t old_used_size = re_header->used_Size();
        re_header->data_size_ = size;

        size_t new_used_size = re_header->used_Size();
        size_t trash = old_used_size - new_used_size;
        if (trash)
        {
            total_avail_size_ += trash;
            if (re_header < current_header_)
                left_avail_size_ += trash;
            else
                right_avail_size_ += trash;
#if (MEM_STAT > 0)
            ASSERT_(cur_usage_ >= trash);
            cur_usage_ -= trash;
#endif
        }
    }
//.............................................................................
    void flo_Realloc_Up_In_Place(TFloHeader* re_header, size_t size,
        size_t add_used_size, bool bZeroInit) NO_THROW_()
    {
        total_avail_size_ -= add_used_size;
        if (re_header < current_header_)
            left_avail_size_ -= add_used_size;
        else
            right_avail_size_ -= add_used_size;

        size_t old_data_size = re_header->data_size_;
        re_header->data_size_ = size;

        size_t add_data_size = size - old_data_size;
        char* tail_ptr = (char*)(re_header + 1) + old_data_size;

        if (bZeroInit)
            memset(tail_ptr, 0, add_data_size);
    }
//.............................................................................
//.............................................................................
    void Free(void* ptr) NO_THROW_()
    {
        ASSERT_(ptr);
#if (MEM_DEBUG_MODE > 1)
        flo_Self_Test();
#endif

        TFloHeader* header = &((TFloHeader*)(ptr))[-1];

#if (MEM_DEBUG_MODE > 0)
        current_header_->check_Magic();
        header->check_Magic();
#endif
#if (MEM_STAT > 0)
        ASSERT_(cur_usage_ >= header->used_Size());
        cur_usage_ -= header->used_Size();
        ASSERT_(cur_allocs_ > 0);
        --cur_allocs_;
#endif

        free_Static_Pointer(header->back_ptr_);

        free_Flo_Header(header);

#if (MEM_DEBUG_MODE > 1)
        flo_Self_Test();
#endif
    }
//.............................................................................
//.............................................................................
//.............................................................................
    size_t flo_Avail() const NO_THROW_()
    {
#if (MEM_DEBUG_MODE > 1)
        flo_Self_Test();
#endif
        return total_avail_size_;
    }
//.............................................................................
//.............................................................................
//.............................................................................
//:forces full defragmentation and trim
    void squeeze() NO_THROW_()
    {
#if (MEM_DEBUG_MODE > 1)
        flo_Self_Test();
#endif

        if (total_avail_size_ > current_header_->avail_Size())
        {
            left_avail_size_ = 0;
            right_avail_size_ = total_avail_size_;
            current_header_ = first_header_;

            size_t avail_size = first_header_->avail_Size();

            TFloHeader* next = first_header_->next_Ptr();

            defragment(next, avail_size, total_avail_size_);
        }

#if (MEM_DEBUG_MODE > 1)
        flo_Self_Test();
#endif
    }
//.............................................................................
//.............................................................................
#if (MEM_DEBUG_MODE > 1)
    void flo_Self_Test(/*bool bDetailed = false*/) const NO_THROW_()
    {//:debug stuff
        current_header_->check_Magic();
        ASSERT_(0 == first_header_->prev_offset_);
        ASSERT_(0 == first_header_->data_size_);
        TFloHeader* header = first_header_;
        size_t total_block_size = 0;
        size_t total_avail_size = 0;
        size_t total_used_size = 0;
        for(; header < current_header_; )
        {
            header->check_Magic();
            ASSERT_(ROUND2(size_t(header), MEM_GRANULARITY) == size_t(header));
            ASSERT_(0 != header->block_size_);
            ASSERT_(header == first_header_ ||
                *header->back_ptr_ == (void*)(header + 1));
            total_block_size += header->block_size_;
            total_avail_size += header->avail_Size();
            total_used_size += header->used_Size();
            //if (bDetailed)
            //    dump_Header(header);
            TFloHeader* temp = header->next_Ptr();
            ASSERT_(temp->prev_Ptr() == header);
            header = temp;
        }

        ASSERT_(total_avail_size == left_avail_size_);

        for(; header < last_header_; )
        {
            header->check_Magic();
            ASSERT_(ROUND2(size_t(header), MEM_GRANULARITY) == size_t(header));
            ASSERT_(0 != header->block_size_);
            ASSERT_(header == first_header_ ||
                *header->back_ptr_ == (void*)(header + 1));
            total_block_size += header->block_size_;
            total_avail_size += header->avail_Size();
            total_used_size += header->used_Size();
            //if (bDetailed)
            //    header->dump();
            TFloHeader* temp = header->next_Ptr();
            ASSERT_(temp->prev_Ptr() == header);
            header = temp;
        }
        last_header_->check_Magic();
        ASSERT_(0 == last_header_->block_size_);
        ASSERT_(0 == last_header_->data_size_);
        ASSERT_(0 != last_header_->prev_offset_);

        ASSERT_(total_avail_size == total_avail_size_);
#if defined(DEBUG) || defined(_DEBUG)
        size_t max_block_size = flo_pages_max_ * MEM_PAGE_SIZE -
            sizeof(TFloHeader);
        ASSERT_(total_block_size == max_block_size);
#endif
        ASSERT_(total_used_size == total_block_size - total_avail_size);
        ASSERT_(right_avail_size_ == total_avail_size_ - left_avail_size_);
    }
#endif
//.............................................................................
//.............................................................................
    void report(std::ostream& os) const
    {
        TFloHeader* header = first_header_;
        size_t total_used_size = 0;
        size_t total_used_count = 0;
        for(; header < current_header_; )
        {
#if (MEM_DEBUG_MODE > 1)
            header->check_Magic();
#endif
            ASSERT_(ROUND2(size_t(header), MEM_GRANULARITY) == size_t(header));
            ASSERT_(0 != header->block_size_);
            ASSERT_(header == first_header_ ||
                *header->back_ptr_ == (void*)(header + 1));
            if (header != first_header_)
            {
                ++total_used_count;
                total_used_size += header->used_Size();
            }
            TFloHeader* temp = header->next_Ptr();
            ASSERT_(temp->prev_Ptr() == header);
            header = temp;
        }
        for(; header < last_header_; )
        {
#if (MEM_DEBUG_MODE > 1)
            header->check_Magic();
#endif
            ASSERT_(ROUND2(size_t(header), MEM_GRANULARITY) == size_t(header));
            ASSERT_(0 != header->block_size_);
            ASSERT_(header == first_header_ ||
                *header->back_ptr_ == (void*)(header + 1));
            if (header != first_header_)
            {
                ++total_used_count;
                total_used_size += header->used_Size();
            }
            TFloHeader* temp = header->next_Ptr();
            ASSERT_(temp->prev_Ptr() == header);
            header = temp;
        }

#if (MEM_STAT > 1)
        os  << "big allocs   : " << allocs_ << lf
            << "mem of allocs: " << alloc_sum_
            << " (" << alloc_sum_ / (1024 * 1024) << "Mb)" << lf
        
            << "average alloc: " << (!allocs_ ? 0 :
                        alloc_sum_ / allocs_) << " bytes" << lf

            << "reallocs     : " << reallocs_ << lf
            
            << "movreallocs  : " << movreallocs_count_ << lf

            << "r. mem moving: " << realloc_moves_
            << " (" << realloc_moves_ / (1024 * 1024) << "Mb)" << lf
            
            << "defrags      : " << defragments_ << lf
            
            << "d. mem moving: " << defragment_moves_
            << " (" << defragment_moves_ / (1024 * 1024) << "Mb)"
            << lf;
#endif
#if (MEM_STAT > 0)
        os  << "big peak     : " << peak_allocs_ << lf
            << "peak mem     : " << peak_usage_
            << " (" << peak_usage_ / (1024 * 1024) << "Mb)" << lf;
#endif
    }
//.............................................................................
    void check_Leaks()
    {
        size_t total_used_count = 0;
        TFloHeader* header = first_header_;
        for(; header < last_header_; )
        {
            if (header != first_header_)
                ++total_used_count;
            header = header->next_Ptr();
        }
        if (0 != total_used_count)
        {
            std::cout << "***LEAKS in thread " << rank_ << ": " << total_used_count << lf;
        }
    }
//.............................................................................
    friend std::ostream& operator <<(std::ostream& os, big_allocator_impl const& a)
    {
        a.report(os);
        return os;
    }
};
//__________________________________________________________________________________
}//:mem_util
//__________________________________________________________________________________
//__________________________________________________________________________________
//__________________________________________________________________________________
static mem_util::big_allocator_impl* g_impl = NULL;
static int g_count = 0;
static mem_util::big_allocator_impl* t_impl = NULL;
#pragma omp threadprivate(t_impl)
//.............................................................................
bool big_allocator::construct(int nMaxThreads, size_t mb) NO_THROW_()
{
    using namespace mem_util;
    g_impl = (big_allocator_impl*)malloc(sizeof(big_allocator_impl) * nMaxThreads);
    if (!g_impl)
        return false;
    g_count = nMaxThreads;
    for (int i = 0; i < nMaxThreads; ++i)
    {
        new(&g_impl[i]) big_allocator_impl(i);
        if (!g_impl[i].construct(mb))
            return false;
    }
    t_impl = &g_impl[0];
    return true;
}
//.............................................................................
void big_allocator::destruct() NO_THROW_()
{
    using namespace mem_util;
    t_impl->check_Leaks();
    t_impl = NULL;
    for (int i = 0; i < g_count; ++i)
        g_impl[i].~big_allocator_impl();
    free(g_impl);
    g_impl = NULL;
    g_count = 0;
}
//.............................................................................
void** big_allocator::Alloc(size_t size, size_t reserved/* = 0*/,
                            bool bZeroInit/* = false*/) NO_THROW_()
{
    return t_impl->Alloc(size, reserved, bZeroInit);
}
//.............................................................................
void big_allocator::Realloc(void* ptr, size_t size, size_t reserved/* = 0*/,
                            bool bZeroInit/* = false*/) NO_THROW_()
{
    t_impl->Realloc(ptr, size, reserved, bZeroInit);
}
//.............................................................................
void big_allocator::Free(void* ptr) NO_THROW_()
{
    t_impl->Free(ptr);
}
//.............................................................................
size_t big_allocator::Get_Avail() NO_THROW_()
{
    return t_impl->flo_Avail();
}
//.............................................................................
void big_allocator::enter_Thread(int nRank) NO_THROW_()
{
    //std::cout << "*enter_Thread(" << nRank << ")" << lf;
    if (!nRank)
        return;
    t_impl = &g_impl[nRank];
}
//.............................................................................
void big_allocator::leave_Thread(int nRank) NO_THROW_()
{
    //std::cout << "*leave_Thread(" << nRank << ")" << lf;
    if (!nRank)
        return;
    //:only master thread can have persistent allocs!
    t_impl->check_Leaks();
    t_impl = NULL;
}
//.............................................................................
void big_allocator::squeeze() NO_THROW_()
{
    t_impl->squeeze();
}
//.............................................................................
void big_allocator::report(std::ostream& os)
{
    t_impl->report(os);
}
//.............................................................................

//EOF!
