//Allocator for small & little data blocks 
// $Id: alloc_lite.cpp 568 2010-10-25 15:41:07Z Oleg.Bulychov $
#include "stdafx.h"

#include "src/heo/include/mem_util.h"
#include "src/heo/include/lf_endl.h"
#include "src/heo/include/alloc_page.h"
#include "src/heo/include/fast_vector.h"
#include "src/heo/include/alloc_lite.h"

/** Chunk Allocator, based on SmallObjectAllocator from
A.Alexandresku "Modern C++ desing"
but modified to use page alloc
*/
//__________________________________________________________________________________
//__________________________________________________________________________________
//HEO_SMART_ALLOCATOR: use malloc for large allocs
#ifndef HEO_SMART_ALLOCATOR
#define HEO_SMART_ALLOCATOR
#endif
//__________________________________________________________________________________
namespace mem_util
{
//__________________________________________________________________________________
class chunk_page
{
public:
    enum { CHAR_AVAIL = MEM_PAGE_SIZE - 2 * sizeof(size_t) };
    typedef chunk_page TChunk;
    size_t first_free_block_;
    size_t free_blocks_;
    char data_[CHAR_AVAIL];
//.............................................................................
    explicit chunk_page(size_t block_size, size_t max_blocks) NO_THROW_():
        first_free_block_(),
        free_blocks_()
    {
        COMPILETIME_CHECK(sizeof(*this) == MEM_PAGE_SIZE, bad_size);
        ASSERT_(block_size <= CHAR_AVAIL);
        ASSERT_(max_blocks == CHAR_AVAIL / block_size);
        free_blocks_ = max_blocks;
        char* p = (char*)data_;
        for(size_t i = 0; i < max_blocks; p += block_size)
            *(size_t*)p = ++i;//:assume p is aligned (see ALIGN below)!
    }
//.............................................................................
    void* Alloc(size_t block_size) NO_THROW_()
    {
        ASSERT_(free_blocks_);
        --free_blocks_;
        size_t* p = (size_t*)(data_ + first_free_block_ * block_size);
        first_free_block_ = *p;
        return p;
    }
//.............................................................................
    void Free(void* ptr, size_t block_size) NO_THROW_()
    {
        ASSERT_((char*)ptr >= data_);
        size_t block_to_free = size_t(((char*)ptr - data_) / block_size);
        size_t* p = (size_t*)ptr;
        *p = first_free_block_;
        first_free_block_ = block_to_free;
        ++free_blocks_;
    }
//.............................................................................
    bool has_Ptr(void* ptr, size_t chunk_cb) const NO_THROW_()
    {
        char* p = (char*)ptr;
        return (data_ <= p) && (p < data_ + chunk_cb);
    }
//.............................................................................
};
//__________________________________________________________________________________
template<class AllocPolicy>
class chunk_allocator
{
    DISALLOW_COPY_AND_ASSIGN(chunk_allocator);
public:
    typedef chunk_page TChunk;
    typedef TChunk* PChunk;
//.............................................................................
    size_t block_size_;
    size_t blocks_per_chunk_;
    fast_vector<PChunk, AllocPolicy> chunk_;
#if (MEM_STAT > 1)
    int64_t   total_allocs_;
#endif
#if (MEM_STAT > 0)
    size_t    cur_allocs_;
    size_t    peak_allocs_;
#endif
    PChunk last_alloc_chunk_;
    PChunk empty_chunk_;
    void*  last_free_ptr_;
    int     rank_;//:TODO review
//.............................................................................
    explicit chunk_allocator() NO_THROW_():
        block_size_(),
        blocks_per_chunk_(),
#if (MEM_STAT > 1)
        total_allocs_(),
#endif
#if (MEM_STAT > 0)
        cur_allocs_(),
        peak_allocs_(),
#endif
        last_alloc_chunk_(),
        empty_chunk_(),
        last_free_ptr_(),
        rank_()
    {}
//.............................................................................
    ~chunk_allocator() NO_THROW_()
    {
#if (MEM_STAT > 0)
        ASSERT_(!cur_allocs_ || last_free_ptr_);
#endif
        for(size_t i = 0; i < chunk_.size(); ++i)
            page_allocator::Free(rank_, chunk_[i]);
    }
//.............................................................................
    void construct(size_t block_size) NO_THROW_()
    {
        ASSERT_(!block_size_);
        ASSERT_(block_size && block_size < TChunk::CHAR_AVAIL);
        ASSERT_(!last_free_ptr_);
        block_size_ = block_size;
        blocks_per_chunk_ = TChunk::CHAR_AVAIL / block_size;
        ASSERT_(blocks_per_chunk_ > 0);
    }
//.............................................................................
    void* Alloc() NO_THROW_()
    {
#if (MEM_STAT > 1)
        ++total_allocs_;
#endif
        ASSERT_(!empty_chunk_ || empty_chunk_->free_blocks_ == blocks_per_chunk_);
        if (last_free_ptr_)
        {
            void* p = last_free_ptr_;
            last_free_ptr_ = NULL;
            return p;
        }

        if (!last_alloc_chunk_ || !last_alloc_chunk_->free_blocks_)
        {
            if (empty_chunk_)
            {
                last_alloc_chunk_ = empty_chunk_;
                empty_chunk_ = NULL;
            }
            else
            {
                for(fast_vector<PChunk>::iterator it = chunk_.begin(); ; ++it)
                {
                    if (chunk_.end() == it)
                    {
                        if (!make_New_Chunk())
                            return NULL;
                        break;
                    }
                    if ((*it)->free_blocks_)
                    {
                        last_alloc_chunk_ = *it;
                        break;
                    }
                }
            }
        }
        if (last_alloc_chunk_ == empty_chunk_)
            empty_chunk_ = NULL;
        ASSERT_(last_alloc_chunk_->free_blocks_);
        ASSERT_(!empty_chunk_ || empty_chunk_->free_blocks_ == blocks_per_chunk_);
#if (MEM_STAT > 0)
        if (++cur_allocs_ > peak_allocs_)
            peak_allocs_ = cur_allocs_;
#endif
        return last_alloc_chunk_->Alloc(block_size_);
    }
//.............................................................................
    void Free(void* ptr) NO_THROW_()
    {
        if (!ptr)
            return;
        ASSERT_(own_Ptr(ptr));
        std::swap(last_free_ptr_, ptr);
        Free_Impl(ptr);
    }
//.............................................................................
    void Free_Impl(void* ptr) NO_THROW_()
    {
        if (!ptr)
            return;
#if (MEM_STAT > 0)
        --cur_allocs_;
        ASSERT_(cur_allocs_ >= 0);
#endif
        PChunk last_free_chunk = find_Chunk_By_Ptr(ptr);
        last_free_chunk->Free(ptr, block_size_);
        if (last_free_chunk->free_blocks_ == blocks_per_chunk_)
        {
            ASSERT_(empty_chunk_ != last_free_chunk);
            empty_chunk_ = last_free_chunk;
        }
    }
//.............................................................................
    bool own_Ptr(void* ptr) const
    {
        PChunk p = (PChunk)((size_t(ptr) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE);
        return own_Chunk(p);
    }
//.............................................................................
    bool own_Chunk(PChunk p) const
    {
        for (size_t i = 0; i < chunk_.size(); ++i)
        {
            if (chunk_[i] == p)
                return true;
        }
        return false;
    }
//.............................................................................
#if !defined(DEBUG) && !defined(_DEBUG)
    static 
#endif
    PChunk find_Chunk_By_Ptr(void* ptr) NO_THROW_()
    {
        ASSERT_(ptr);
        PChunk p = (PChunk)((size_t(ptr) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE);
        ASSERT_((void*)p != ptr);
        ASSERT_(own_Chunk(p));
        ASSERT_(p->has_Ptr(ptr, blocks_per_chunk_ * block_size_));
        return p;
    }
//.............................................................................
    bool make_New_Chunk() NO_THROW_()
    {
        void* p = page_allocator::Alloc(rank_);
        if (!p)
            return false;
        PChunk pc = new(p) TChunk(block_size_, blocks_per_chunk_);
        PChunk* pp = chunk_.push_back(mem_util::no_check_null_t());
        if (!pp)
            return false;
        *pp = pc;
        last_alloc_chunk_ = pc;
        return true;
    }
//.............................................................................
    void report(std::ostream& os, size_t index, int64_t& total_peak_mem) const
    {
#if (MEM_STAT > 1)
        if (total_allocs_)
            os  << "lite allocs<" << block_size_ << ">: " << total_allocs_ << lf;
#endif
#if (MEM_STAT > 0)
        if (peak_allocs_)
        {
            int64_t peak_mem = int64_t(peak_allocs_) * block_size_;
            total_peak_mem += peak_mem;
            os  << "peak<" << block_size_ << ">: " << peak_allocs_ << lf
                << "peak mem: " << peak_mem
                << " (" << peak_mem / (1024 * 1024) << "Mb)" << lf;
        }
#endif
    }
//.............................................................................
//.............................................................................
    void check_Leaks(size_t index) const
    {
        size_t chunk_cb = blocks_per_chunk_ * block_size_;
        for (size_t i = 0; i < chunk_.size(); ++i)
        {
            if (chunk_[i]->free_blocks_ != blocks_per_chunk_)
            {
                size_t leaks = blocks_per_chunk_ - chunk_[i]->free_blocks_;
                if (leaks && last_free_ptr_ && chunk_[i]->has_Ptr(last_free_ptr_, chunk_cb))
                    --leaks;
                if (!leaks)
                    continue;
                std::cout << "***LEAKS in thread " << rank_
                    << ", block_size " << block_size_ << ", chunk["
                    << index << ", " << i << "]: "
                    << leaks << lf;
            }
        }
    }
};
//__________________________________________________________________________________
template<class AllocPolicy,
         size_t MAX_OBJECT_SIZE = 256u,
         size_t ALIGN = 4u>
class lite_allocator_base
{
    DISALLOW_COPY_AND_ASSIGN(lite_allocator_base);
public:
    enum
    {
        max_object_size = MAX_OBJECT_SIZE,
        align = ALIGN,
    };
    fast_vector< chunk_allocator< AllocPolicy >, AllocPolicy > chunk_allocator_;
    int rank_;
//.............................................................................
    explicit lite_allocator_base(int nRank) NO_THROW_():
        rank_(nRank)
    {
        chunk_allocator_.resize(Get_Offset(max_object_size) + 1);
        for(size_t i = 0; i < chunk_allocator_.size(); ++i)
        {
            chunk_allocator_[i].construct((i + 1) * align);
            chunk_allocator_[i].rank_ = nRank;
        }
    }
//.............................................................................
    void* Alloc(size_t cbSize) NO_THROW_()
    {
        //KTicksMonitorLock tm(g_a);
        ASSERT_(cbSize <= max_object_size);
        if (!cbSize)
            ++cbSize;
        return chunk_allocator_[Get_Offset(cbSize)].Alloc();
    }
//.............................................................................
    void Free(void* ptr, size_t cbSize) NO_THROW_()
    {
        //KTicksMonitorLock tm(g_f);
        ASSERT_(cbSize <= max_object_size);
        if (!cbSize)
            ++cbSize;
        chunk_allocator_[Get_Offset(cbSize)].Free(ptr);
    }
//.............................................................................
    static size_t Get_Offset(size_t obj_size) NO_THROW_()
    {
        return (obj_size + ALIGN - 1) / ALIGN - 1;
    }
//.............................................................................
    void report(std::ostream& os) const
    {
        size_t count = chunk_allocator_.size();
        //os << "sub-allocators: " << count << lf;
        int64_t peak_mem = 0;
        for(size_t i = 0; i < count; ++i)
            chunk_allocator_[i].report(os, i, peak_mem);
        os  << "total peak mem: " << peak_mem
            << " (" << peak_mem / (1024 * 1024) << "Mb)" << lf;
    }
//.............................................................................
    void check_Leaks() const
    {
        size_t count = chunk_allocator_.size();
        for(size_t i = 0; i < count; ++i)
            chunk_allocator_[i].check_Leaks(i);
    }
};
//__________________________________________________________________________________
class lite_allocator_impl: public lite_allocator_base< mem_util::malloc_policy<> >
{
public:
    lite_allocator_impl(int nRank): lite_allocator_base< mem_util::malloc_policy<> >(nRank)
    {}
};
//__________________________________________________________________________________
}//:mem_util
//__________________________________________________________________________________
static mem_util::lite_allocator_impl* g_impl = NULL;
static int g_count = 0;
static mem_util::lite_allocator_impl* t_impl = NULL;
#pragma omp threadprivate(t_impl)
//.............................................................................
bool lite_allocator::construct(int nMaxThreads, size_t mb) NO_THROW_()
{
    using namespace mem_util;
    if (!page_allocator::construct(nMaxThreads, mb))
        return false;
    g_impl = (lite_allocator_impl*)malloc(sizeof(lite_allocator_impl) * nMaxThreads);
    if (!g_impl)
        return false;
    g_count = nMaxThreads;
    for (int i = 0; i < nMaxThreads; ++i)
        new(&g_impl[i]) lite_allocator_impl(i);
    t_impl = &g_impl[0];
    return true;
}
//.............................................................................
void lite_allocator::destruct() NO_THROW_()
{
    using namespace mem_util;
    t_impl->check_Leaks();
    t_impl = NULL;
    for (int i = 0; i < g_count; ++i)
        g_impl[i].~lite_allocator_impl();
    free(g_impl);
    g_impl = NULL;
    g_count = 0;
}
//.............................................................................
void* lite_allocator::Alloc(size_t cbSize) NO_THROW_()
{
    void* ret = Alloc(cbSize, mem_util::no_check_null_t());
    if (!ret)
    {
        Mem_Panic(cbSize, NULL);
        VERIFY_(ret);
    }
    return ret;
}
//.............................................................................
void* lite_allocator::Alloc(size_t cbSize, mem_util::no_check_null_t const&) NO_THROW_()
{
#if defined(HEO_SMART_ALLOCATOR)
    if (cbSize > t_impl->max_object_size)
        return malloc(cbSize);
#endif
    return t_impl->Alloc(cbSize);
}
//.............................................................................
//.............................................................................
void lite_allocator::Free(void* ptr, size_t cbSize) NO_THROW_()
{
#if defined(HEO_SMART_ALLOCATOR)
    if (cbSize > mem_util::lite_allocator_impl::max_object_size)
    {
        free(ptr);
        return;
    }
#endif
    t_impl->Free(ptr, cbSize);
}
//.............................................................................
void lite_allocator::enter_Thread(int nRank) NO_THROW_()
{
    //std::cout << "*enter_Thread(" << nRank << ")" << lf;
    if (!nRank)
        return;
    t_impl = &g_impl[nRank];
}
//.............................................................................
void lite_allocator::leave_Thread(int nRank) NO_THROW_()
{
    //std::cout << "*leave_Thread(" << nRank << ")" << lf;
    if (!nRank)
        return;
    //:only master thread can have persistent allocs!
    t_impl->check_Leaks();
    t_impl = NULL;
}
//.............................................................................
//.............................................................................
size_t lite_allocator::get_Max_Item_Size() NO_THROW_()
{
    return mem_util::lite_allocator_impl::max_object_size;
}
//.............................................................................
//.............................................................................
void lite_allocator::report(std::ostream& os)
{
    t_impl->report(os);
}
//__________________________________________________________________________________

//EOF!
