#ifndef _BUOLA_MEMORY_CRBTREEMEMORYALGORITHM_H_
#define _BUOLA_MEMORY_CRBTREEMEMORYALGORITHM_H_

#include <buola/buola.h>
#include <buola/container/cintrusiverbtree.h>

namespace buola {

template<typename tMutex,typename tVoidPointer,typename tSize=std::size_t,tSize vAlignment=2*sizeof(tSize)>
class CRBTreeMemoryAlgorithm
{
    static_assert((vAlignment&(vAlignment-1))==0,"alignment must be a power of two");

    struct SBlock;
    typedef ORebindPointer<tVoidPointer,SBlock> tBlockPointer;
    typedef CIntrusiveRBTree<SBlock,false,std::less<SBlock>,tBlockPointer> tTree;
    
    //we put this into a base class because we want it before SHook, so that we can actually give SHook to the user
    struct SSizeInfo
    {
        tSize mSize;
        tSize mPrevSize; //to mark a block as allocated, we put the prev size of the next block to 0
                         //the first block contains the size of the last block
    };
    
    struct SBlock : public SSizeInfo, public tTree::SHook
    {
        SBlock()
        {}

        bool operator<(const SBlock &pRH) const
        {
            return this->mSize<pRH.mSize;
        }
    };

    // tMutex is inherited to use the empty-base optimization if it is empty
    struct SHeader : public tMutex
    {
        std::ptrdiff_t mOffset; //offset from this where allocation can start
        tSize mAllocatedSize;
        tSize mUsableSize;
        
        tTree mTree;
    };
    
public:
    CRBTreeMemoryAlgorithm(tSize pSegmentSize,void *pSegment,tSize pHeaderSize)
    {
        uint8_t *lThis=reinterpret_cast<uint8_t*>(this);
        uint8_t *lSegment=reinterpret_cast<uint8_t*>(pSegment);
    
        uint8_t *lAllocStart=get_next_aligned_pos(lSegment+pHeaderSize);
    
        mHeader.mUsableSize=get_prev_aligned_size(pSegmentSize-(lAllocStart-lSegment));
        mHeader.mOffset=lAllocStart-lThis;
        mHeader.mAllocatedSize=0;

        //add the first segment (just everything)
        AddSegment(lAllocStart,mHeader.mUsableSize);
    }
    
    bool empty()        {   return mHeader.mAllocatedSize==0;   }
    tSize UsableSize()  {   return mHeader.mUsableSize;   }
    tSize FreeMemory()    {   return mHeader.mUsableSize-mHeader.mAllocatedSize;  }

    void *Allocate(tSize pSize,std::nothrow_t)
    {
        pSize=get_next_aligned_size(pSize,vAlignment);
        //never allocate a block smaller than SBlock, because it can't be returned
        tSize lTotalSize=get_next_aligned_size(std::max(sizeof(SBlock),pSize+sizeof(SSizeInfo)),vAlignment);
    
        lock_guard<tMutex> lLock(mHeader);

        SBlock lDummyBlock;
        lDummyBlock.mSize=lTotalSize;
        auto lI=mHeader.mTree.lower_bound(lDummyBlock);
        if(lI!=mHeader.mTree.end())
        {
            SBlock *lOldBlock=&*lI;
            SBlock *lNewBlock;
            
            if(lOldBlock->mSize>=lTotalSize+sizeof(SBlock)) //there would be size for another allocation, so split it
            {
                //we leave the empty space in the beginning, so we create a new node to contain the allocated space
                //this way the node in the tree can stay there (of course after moving it within the structure)
                lOldBlock->mSize-=lTotalSize; //it will be the remaining size
                
                lNewBlock=new(reinterpret_cast<uint8_t*>(lOldBlock)+lOldBlock->mSize) SBlock;                
                lNewBlock->mSize=lTotalSize;
                lNewBlock->mPrevSize=lOldBlock->mSize;

                if(lI!=mHeader.mTree.begin()&&(--typename tTree::iterator(lI))->mSize>=lOldBlock->mSize) //we changed the order
                {
                    //erase the block and add it again
                    mHeader.mTree.erase(lI);
                    mHeader.mTree.InsertEqual(*lOldBlock);
                }
            }
            else //it just barely fits
            {
                lNewBlock=lOldBlock;
                mHeader.mTree.erase(lI);
            }
            
            mHeader.mAllocatedSize+=lNewBlock->mSize;
            FixNext(lNewBlock,true);

            return BlockToUser(lNewBlock);
        }
        return nullptr;
    }

    void *Allocate(tSize pSize)
    {
        if(void *p=Allocate(pSize,std::nothrow))
            return p;
        
        throw std::bad_alloc();
    }
    
    void Deallocate(void *pUser)
    {
        if(!pUser) return;
        SBlock *lBlock=UserToBlock(pUser);
        
        mHeader.mAllocatedSize-=lBlock->mSize;

        lock_guard<tMutex> lLock(mHeader);

        if(SBlock *lNext=NextIfFree(lBlock)) //merge it
        {
            lBlock->mSize+=lNext->mSize;
            mHeader.mTree.erase(mHeader.mTree.iterator_to(*lNext));
        }
        
        if(SBlock *lPrev=PrevIfFree(lBlock))
        {
            mHeader.mTree.erase(mHeader.mTree.iterator_to(*lPrev));
            lPrev->mSize+=lBlock->mSize;
            lBlock=lPrev;
        }

        mHeader.mTree.InsertEqual(*lBlock);
        FixNext(lBlock,false);
   }

private:
    tSize SizeInfoSize()        {   return get_next_aligned_size(sizeof(SSizeInfo),vAlignment); }

    uint8_t *AllocableStart()   {   return reinterpret_cast<uint8_t*>(this)+mHeader.mOffset;   }
    uint8_t *AllocableEnd()     {   return AllocableStart()+mHeader.mUsableSize;  }

    SBlock *FirstBlock()        {   return reinterpret_cast<SBlock*>(AllocableStart());         }
    
    SBlock *UserToBlock(void *pUser)
    {
        return reinterpret_cast<SBlock*>(reinterpret_cast<uint8_t*>(pUser)-SizeInfoSize());
    }

    void *BlockToUser(SBlock *pBlock)
    {
        return reinterpret_cast<uint8_t*>(pBlock)+SizeInfoSize();
    }    
    
    //returns the first block for the last block
    SBlock *NextBlock(SBlock *pBlock)
    {   
        uint8_t *lEnd=reinterpret_cast<uint8_t*>(pBlock)+pBlock->mSize;
        if(lEnd==AllocableEnd()) return FirstBlock();
        return reinterpret_cast<SBlock*>(lEnd);
    }

    SBlock *PrevIfFree(SBlock *pBlock)
    {
        if(!pBlock->mPrevSize) return nullptr;
        uint8_t *lBeg=reinterpret_cast<uint8_t*>(pBlock);
        if(lBeg==AllocableStart()) return nullptr;
        return reinterpret_cast<SBlock*>(lBeg-pBlock->mPrevSize);
    }
    
    SBlock *NextIfFree(SBlock *pBlock)
    {
        uint8_t *lEnd=reinterpret_cast<uint8_t*>(pBlock)+pBlock->mSize;
        if(lEnd==AllocableEnd()) return nullptr;
        SBlock *lBlock=reinterpret_cast<SBlock*>(lEnd);
        if(NextBlock(lBlock)->mPrevSize) return lBlock;
        return nullptr;
    }

    void AddSegment(void *pAddress,tSize pSize)
    {
        SBlock *lBigBlock=new(pAddress) SBlock;
        lBigBlock->mSize=pSize;
        lBigBlock->mPrevSize=pSize;
        mHeader.mTree.InsertEqual(*lBigBlock);
    }
    
    void FixNext(SBlock *pBlock,bool pAllocated)
    {
        NextBlock(pBlock)->mPrevSize=pAllocated?0:pBlock->mSize;
    }

private:
    tSize mSegmentSize;
    SHeader mHeader;
};

/*namespace buola*/ }

#endif
