#include"ThreadCache.h"
#include"CentralCache.h"

// 申请内存对象
void* ThreadCache::Allocate(size_t size)
{
    assert(size <= MAX_BYTES);

    size_t index = SizeClass::Index(size);
    size_t alignSize = SizeClass::RoundUp(size);

    if(!_freeLists[index].Empty())
    {
        return _freeLists[index].Pop();
    }
    else
    {
        return FetchFromCentralCache(index, alignSize);
    }
}


// 从central cache中获取内存对象
void* ThreadCache::FetchFromCentralCache(size_t index, size_t size)
{
    // 慢开始反馈调节算法
    // 1. 最开始不会一次批量申请太多个对象, 可能用不完浪费
    // 2. 如果不断有size大小的内存需求, 那么batchNum将会不断变大直至上限
    // 3. size越大, 一次批量申请的内存对象就越少
    // 4. size越小, 一次批量申请的内存对象就越多
    size_t batchNum = std::min(SizeClass::NumMoveSize(size), _freeLists[index].MaxSize());
    if(batchNum == _freeLists[index].MaxSize())
    {
        _freeLists[index].MaxSize() += 1;
    }

    void* start = nullptr;
    void* end = nullptr;
    size_t actualNum = CentralCache::GetInstance()->FetchRangeObj(start, end, batchNum, size);
    assert(actualNum > 0);

    if(actualNum == 1)
    {
        assert(start == end);
        return start;
    }
    else
    {
        _freeLists[index].PushRange(NextObj(start), end, actualNum - 1);
        return start;
    }
}


// 释放内存对象
void ThreadCache::Deallocate(void* ptr, size_t size)
{
    assert(ptr);
    assert(size <= MAX_BYTES);

    size_t index = SizeClass::Index(size);
    _freeLists[index].Push(ptr);

    if(_freeLists[index].Size() >= _freeLists[index].MaxSize())
    {
        ListTooLong(_freeLists[index], size);
    }
}


// 释放对象链表过长时, 回收一段内存对象到central cache中
void ThreadCache::ListTooLong(FreeList& list, size_t size)
{
    void* start = nullptr;
    void* end = nullptr;
    list.PopRange(start, end, list.Size());

    // 将一定数量的内存对象释放回span跨度
    CentralCache::GetInstance()->ReleaseListToSpans(start, size);
}