/*
 * Copyright (c) 2007, 2008 fr3@K <freak@fsfoundry.org>
 * 
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 * 
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 * 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */

#ifndef MANKA__POOL_ARENA_HPP
#define MANKA__POOL_ARENA_HPP

#include <manka/nullptr.hpp>
#include <manka/null_mutex.hpp>
#include <manka/arena_allocator.hpp>
#include <manka/default_arena.hpp>

#include <cstddef>
#include <cassert>
#include <limits>

namespace manka
{

    /** A memory pool arena class template.
     *
     * @param ChunkBytes Size in bytes of memory chunks managed by this arena.
     * 
     * @par Concepts:
     * MemoryArena
     */
    template <
        std::size_t ChunkBytes,
        typename MutexType = null_mutex,
        typename LowerLayerArena = default_arena>
    class pool_arena
    {
    private:
        /** Disable copying.
         *
         */ 
        pool_arena(const pool_arena&);

        /** Disable assignment.
         *
         */ 
        pool_arena& operator=(const pool_arena&);

    public:
        typedef MutexType mutex_type;
        typedef typename MutexType::scoped_lock scoped_lock;
        typedef LowerLayerArena lower_layer_t;

        
    public:
        /** Construct a \c pool_arena.
         *
         */ 
        pool_arena()
            :   managed_pages_(nullptr),
                free_chunks_(nullptr),
                DEBUG_managed_page_count_(0),
                DEBUG_free_chunk_count_(0),
                DEBUG_outstanding_oversized_allocation_count_(0)
            {
            }

        /** Desstruct a \c pool_arena.
         *
         */ 
        ~pool_arena()
            {
                this->purge_();
            }

        /** Memory allocation.
         *
         * @param bytes Size in bytes to allocate.
         *
         * @throws std::bad_alloc Thrown on failure.
         */
        void* allocate(std::size_t bytes)
            {
                scoped_lock sl(mutex_);
                return this->allocate_(bytes);
            }
        
        /** Memory deallocation.
         *
         * This function is used to deallocate memory previously allocated from
         * this arena.
         * 
         * @param ptr Pointer to memory previously allocated from this arena.
         * @param bytes Size in bytes of memory pointed by \c ptr.
         */
        void deallocate(void* ptr, std::size_t bytes)
            {
                scoped_lock sl(mutex_);
                this->deallocate_(ptr, bytes);
            }

        /** Capability query.
         *
         * This function is used to query the capability of this arena, i.e.
         * the largest allocation size in bytes that can be potentially
         * satisfied.
         */
        std::size_t max_bytes() const
            {
                return std::numeric_limits<std::size_t>::max();
            }

        /** Get a allocator instance.
         *
         * This function returns a allocator instance that is bound to this
         * memory arena.
         */
        arena_allocator<pool_arena, void>
        get_allocator()
            {
                return arena_allocator<pool_arena, void>(*this);
            }

    private:
#if !defined(MANKA_DOC)
        static const std::size_t chunk_bytes = ChunkBytes;

        struct chunk_t
        {
            union
            {
                char padding[chunk_bytes];
                chunk_t* next;
            };
        };

        static const std::size_t real_chunk_bytes = sizeof(chunk_t);
        static const std::size_t page_bytes = 1024 * 4;
        static const std::size_t chunks_per_page =
            ((sizeof(chunk_t) < (page_bytes - sizeof(void*))) ?
             (page_bytes - sizeof(void*)) / sizeof(chunk_t) :
             1);
        
        struct page_t
        {
            chunk_t chunks[chunks_per_page];
            page_t* next;
        };
        
    private:
        lower_layer_t lower_layer_;
        page_t* managed_pages_;
        chunk_t* free_chunks_;
        mutex_type mutex_;

    public:
        std::size_t DEBUG_managed_page_count_;
        std::size_t DEBUG_free_chunk_count_;
        std::size_t DEBUG_outstanding_oversized_allocation_count_;

    private:
        static page_t* allocate_page_(lower_layer_t& lower_layer_)
            {
                page_t* page =
                    reinterpret_cast<page_t*>(
                        lower_layer_.allocate(sizeof(page_t)));
                chunk_t* chunk = page->chunks;
                chunk_t* last = &(page->chunks[chunks_per_page]);
                while(chunk != last)
                {
                    chunk_t* next = chunk + 1;
                    chunk->next = next;
                    chunk = next;
                }
                if(chunks_per_page > 1)
                {
                    page->chunks[chunks_per_page - 1].next =
                        nullptr;
                }
                page->next = nullptr;
                return page;
            }

        std::size_t outstanding_managed_allocations_() const
            {
                return (DEBUG_managed_page_count_ * chunks_per_page) -
                    DEBUG_free_chunk_count_;
            }
        std::size_t outstanding_unmanaged_allocations_() const
            {
                return DEBUG_outstanding_oversized_allocation_count_;
            }

        void* allocate_(std::size_t bytes)
            {
                if(bytes > chunk_bytes)
                {
                    ++DEBUG_outstanding_oversized_allocation_count_;
                    return lower_layer_.allocate(bytes);
                }
                
                if(free_chunks_ == nullptr)
                {
                    page_t* new_page =
                        this->allocate_page_(lower_layer_);

                    new_page->next = managed_pages_;
                    managed_pages_ = new_page;

                    free_chunks_ = new_page->chunks;
                    ++DEBUG_managed_page_count_;
                    DEBUG_free_chunk_count_ += chunks_per_page;
                }

                chunk_t* chunk = free_chunks_;
                free_chunks_ = free_chunks_->next;
                --DEBUG_free_chunk_count_;

                return chunk;
            }
        void deallocate_(void* ptr, std::size_t bytes)
            {
                if(bytes > chunk_bytes)
                {
                    --DEBUG_outstanding_oversized_allocation_count_;

                    lower_layer_.deallocate(ptr, bytes);
                    return;
                }

                chunk_t* chunk = reinterpret_cast<chunk_t*>(ptr);
                chunk->next = free_chunks_;
                free_chunks_ = chunk;
                ++DEBUG_free_chunk_count_;
            }
        void purge_()
            {
                assert(
                    (DEBUG_managed_page_count_ * chunks_per_page) -
                    DEBUG_free_chunk_count_ == 0);
                
                assert(outstanding_unmanaged_allocations_() == 0);

                page_t* page = managed_pages_;

                while(page != nullptr)
                {
                    page_t* next = page->next;
                    lower_layer_.deallocate(page, sizeof(page_t));
                    page = next;
                }
                managed_pages_ = nullptr;
                free_chunks_ = nullptr;
                DEBUG_managed_page_count_ = 0;
                DEBUG_free_chunk_count_ = 0;
                DEBUG_outstanding_oversized_allocation_count_ = 0;
            }
#endif // !MANKA_DOC
    };

} // namespace manka

#endif // !MANKA__POOL_ARENA_HPP
