#include"ThreadCache.h"
#include"Common.h"
#include"CentralCache.h"

void* ThreadCache::FetchFromCentralCache(size_t index,size_t size)
{
    //缓慢开始调节算法
    //最开始不会一次像central  cache一次要那么多，因为太多了可能用不完
    //如果你不要这个size大小内存需求，那么batchnum就会不断增长，直到上限
    //size越大，一次向central cache要的batchnum就越小
    //size越小，一次向central cache要的batchnum就越大
    size_t batchnum = std::min(_freelists[index].MaxSize(), Sizeclass::NumMoiveSize(size));

    void* start = nullptr;
    void* end = nullptr;

    size_t actualnum = CentralCache::GetInstance()->FetchRangobj(start,end,batchnum,size);
    assert(actualnum > 0);
    if(actualnum == 1)
    {
        assert(start = end);
        return start;
    }
    else
    {
        _freelists[index].pushrange(ListNext(start),end,actualnum - 1); 
        return start;
    }
    if(_freelists[index].MaxSize() == batchnum)
    {
        _freelists[index].MaxSize() += 1;
    }

    return nullptr;
}

void* ThreadCache::Allocate(size_t size)
{
    assert(size <= MAX_BYTES);
    size_t align_num = Sizeclass::Roundup(size);
    size_t index = Sizeclass::Index(size);

    if(!_freelists[index].Empty())
    {
        return _freelists[index].pop();
    }
    else
    {
        return FetchFromCentralCache(index,align_num); 
    }
}

void ThreadCache::Deallocate(void* ptr,size_t size)
{
    assert(ptr);
    assert(size <= MAX_BYTES);
    
    size_t index = Sizeclass::Index(size);
    return _freelists[index].push(ptr);

    //当链表长度大于一次批量申请的内存就开始还一段list 给 Central Cache
    if(_freelists[index].Size() >= _freelists[index].MaxSize())
    {
        ListTooLong(_freelists[index],size);
    }
}

void ThreadCache::ListTooLong(FreeList& list,size_t size)
{
    void* start = nullptr;
    void* end = nullptr;

    list.poprange(start,end,list.MaxSize());

    CentralCache::GetInstance()->ReleaseListToSpans(start,size);

}
