#pragma once
#include "Common.hpp"
#include "CentralCache.hpp"

class ThreadCache
{
    public:
        void* FetchFromCentralCache(size_t index , size_t size)
        {
            //慢开始反馈算法
            //1.最开始不会一次向central cache一次批量要太多，因为要太多了可能用不完
            //2.如果不断需要size大小的内存块，那么一次获取个数上限会线性增长
            //3.size如果比较大，那么内存块个数上限就会比较小
            //4.size如果比较小，那么内存块个数上限就会比较大
            size_t batchNum = std::min(SizeClass::NumMoveSize(size),_freeLists[index].MaxSize());
            if(batchNum == _freeLists[index].MaxSize())
            {
                _freeLists[index].MaxSize()++;
            }
            void* start = nullptr;
            void* end = nullptr;
            size_t actualNum =  CentralCache::GetInstance()->FetchRangeObj(start,end,batchNum,size);
            assert(actualNum > 0);
            if(actualNum == 1)
            {
                assert(start == end);
                return start;
            }
            else 
            {
                _freeLists[index].PushRange(FreeList::NextObj(start),end);
                return start;
            }
        }
        void* Allocate(size_t size)
        {
            assert(size <= MAX_BYTES);
            size_t alignSize = SizeClass::RoundUp(size); //计算对齐后的大小
            size_t index = SizeClass::Index(size); //计算哈希桶的位置
            if(!_freeLists[index].Empty())
            {
                //如果index对应自由链表有内存块就直接分配
                return _freeLists[index].Pop();
            }
            else 
            {
                //如果index对应自由链表无空间就向Central Cache中获取对象
                return FetchFromCentralCache(index,alignSize);
            }
        }
        void Deallocate(void* ptr , size_t size)
        {
            assert(ptr);
            assert(size < MAX_BYTES);
            size_t index = SizeClass::Index(size);
            _freeLists[index].Push(ptr);
        }
    private:
        FreeList _freeLists[NFREELISTS];
};
thread_local ThreadCache* pTLSThreadCache = nullptr;