package objPool

import (
	"fmt"
	"sync"
	"sync/atomic"
)

const (
	default_obj_pool_list_size          = 10
	default_obj_pool_sub_list_init_size = 10

	OPEN_OBJPOOL_COUNT_TRACE = false
)

/*
如果大量对象在同一时候需要pull,然后再很快被push,
接着对象的使用频率就很低的话,就会有大量对象堆积在pool中不会释放也不会被使用到,
虽然这种情况比较极端,但也不是没有可能,

于是保留一定数量的obj, 超过这个数量的, 就直接释放, 然后设定阈值
*/
type ObjPool struct {
	name         string
	list         [][]interface{}
	tailIdxes    []int
	mutexes      []sync.Mutex
	newObjFunc   func() interface{}
	clearObjFunc func(obj interface{})

	storeSubListLimit int32

	// 为了尽量均匀地分布obj,同时不想使用代价较高的rand,于是采用简单的逐个推进的方式来pull/push obj
	pullIdx      int
	pullIdxMutex sync.Mutex
	pushIdx      int
	pushIdxMutex sync.Mutex
}

func NewObjPool(name string, size int, subListInitSize int, newObjFunc func() interface{}) *ObjPool {
	if size <= 0 {
		size = default_obj_pool_list_size
	}
	if subListInitSize <= 0 {
		subListInitSize = default_obj_pool_sub_list_init_size
	}
	if newObjFunc == nil {
		panic(fmt.Errorf("%s; newObjFunc is nil", name))
	}

	rlt := &ObjPool{
		name:              name,
		list:              make([][]interface{}, size),
		tailIdxes:         make([]int, size),
		mutexes:           make([]sync.Mutex, size),
		newObjFunc:        newObjFunc,
		storeSubListLimit: int32(size * subListInitSize * 32), // default 10*10*(2^5)=3200, max 5 times normal capacity double extend
	}

	for i := 0; i < size; i++ {
		rlt.list[i] = make([]interface{}, subListInitSize, subListInitSize)
		rlt.tailIdxes[i] = -1
	}

	return rlt
}

func NewObjPoolWithClear(name string, size int, subListInitSize int, newObjFunc func() interface{}, clearObjFunc func(obj interface{})) *ObjPool {
	rlt := NewObjPool(name, size, subListInitSize, newObjFunc)
	rlt.clearObjFunc = clearObjFunc
	return rlt
}

//  set 0 to close, > 0 is open
func (self *ObjPool) SetLimitedStore(storeCntLimit int32) {
	storeSubListLimit := storeCntLimit / int32(len(self.list))
	if storeSubListLimit < default_obj_pool_sub_list_init_size {
		storeSubListLimit = default_obj_pool_sub_list_init_size
	}
	atomic.StoreInt32(&self.storeSubListLimit, storeSubListLimit)
}

func (self *ObjPool) nextPullIdx() int {
	self.pullIdxMutex.Lock()
	defer self.pullIdxMutex.Unlock()

	self.pullIdx = (self.pullIdx + 1) % len(self.list)
	return self.pullIdx
}

func (self *ObjPool) nextPushIdx() int {
	self.pushIdxMutex.Lock()
	defer self.pushIdxMutex.Unlock()

	self.pushIdx = (self.pushIdx + 1) % len(self.list)
	return self.pushIdx
}

func (self *ObjPool) GetCurObjCnt() (rlt int) {
	for i := 0; i < len(self.mutexes); i++ {
		func(idx int) {
			mutex := &self.mutexes[idx]
			mutex.Lock()
			defer mutex.Unlock()

			rlt += self.tailIdxes[idx] + 1
		}(i)
	}
	return
}

var g_testMutex = sync.Mutex{}
var g_testPullCnt = int64(0)
var g_testPushCnt = int64(0)
var g_testDeltaCnt = map[string]int{}
var g_testLastChanged = false
var g_testObjCnt = map[string]int{}
var g_testLogN = int64(0)

const g_testOutputStep = 10000

func (self *ObjPool) Pull() (obj interface{}) {
	if OPEN_OBJPOOL_COUNT_TRACE {
		func() {
			g_testMutex.Lock()
			defer g_testMutex.Unlock()
			g_testDeltaCnt[self.name]++
			g_testPullCnt++
		}()
	}

	idx := self.nextPullIdx()

	mutex := &self.mutexes[idx]
	mutex.Lock()
	defer mutex.Unlock()

	tailIdx := self.tailIdxes[idx]
	if tailIdx < 0 {
		//fmt.Printf("pull new obj\n")
		return self.newObjFunc()
	}

	obj = self.list[idx][tailIdx]
	self.list[idx][tailIdx] = nil
	self.tailIdxes[idx]--
	return
}

var Test_ObjPool_OnPushGetDebugInfos func(leftPullCnts string, storedObjCnts string)

func (self *ObjPool) Push(obj interface{}) {
	if self.clearObjFunc != nil {
		self.clearObjFunc(obj)
	}

	if OPEN_OBJPOOL_COUNT_TRACE {
		// 为了规避外部锁嵌套造成死锁, 这里用生成好的string来做中介
		var leftPullCntsInfo string
		var storedObjCntsInfo string
		sendLog := false
		func() {
			g_testMutex.Lock()
			defer g_testMutex.Unlock()
			g_testObjCnt[self.name] = self.GetCurObjCnt()
			g_testDeltaCnt[self.name]--
			g_testPushCnt++
			curChanged := g_testPullCnt != g_testPushCnt
			g_testLogN++
			sendLog = g_testLogN%g_testOutputStep == 0
			// push/pull is called so many times, we just output one log per 1000
			if sendLog && (g_testLastChanged != curChanged || curChanged) {
				if Test_ObjPool_OnPushGetDebugInfos != nil {
					leftPullCntsInfo = fmt.Sprintf("%v", g_testDeltaCnt)
					storedObjCntsInfo = fmt.Sprintf("%v", g_testObjCnt)
				}
				//log.Info("", log.Trace, "leftPullCnt: %v;\nobjCnt:%v", g_testDeltaCnt, g_testObjCnt)
			}
			g_testLastChanged = curChanged
		}()

		if sendLog && Test_ObjPool_OnPushGetDebugInfos != nil {
			Test_ObjPool_OnPushGetDebugInfos(leftPullCntsInfo, storedObjCntsInfo)
		}
	}

	idx := self.nextPushIdx()

	mutex := &self.mutexes[idx]
	mutex.Lock()
	defer mutex.Unlock()

	self.tailIdxes[idx]++

	tailIdx := self.tailIdxes[idx]

	subList := self.list[idx]
	subListLen := len(subList)
	if tailIdx >= subListLen {
		storeSubListLimit := int(atomic.LoadInt32(&self.storeSubListLimit))
		if subListLen > storeSubListLimit {
			newSubList := make([]interface{}, storeSubListLimit)
			for i := 0; i < storeSubListLimit; i++ {
				newSubList[i] = subList[i]
			}
			self.list[idx] = newSubList
			self.tailIdxes[idx] = storeSubListLimit - 1
			// just ignore obj to directly release it
			//fmt.Printf("subListLen %d over limit, remake list, ignore obj\n", subListLen)
		} else if subListLen == storeSubListLimit {
			self.tailIdxes[idx]-- // revert ++ op
			// just ignore obj to directly release it
			//fmt.Printf("subListLen %d arrives limit, ignore obj\n", subListLen)
		} else {
			//oldLen := len(subList)
			subList = append(subList, obj)
			self.list[idx] = subList[:cap(subList)]
			//newLen := len(self.list[idx])
			//fmt.Printf("oldlen %d -> newlen %d\n", oldLen, newLen)
		}
	} else {
		subList[tailIdx] = obj
	}
}
