package bufferPool

import (
	"bytes"
	"fmt"
	"gitee.com/kingzyt/common/activityChecker"
	bytes2 "gitee.com/kingzyt/common/bytes"
	"gitee.com/kingzyt/common/log"
	"sync"
	"time"
)

const OPEN_BUFFERPOOL_GET_KEEP_BUF_INFO = false

// 初始先添加1k范围的buf（20*50）
const default_sub_pool_num = 50

// 若最大2k的内存申请，那么最多有100个subPool
const default_sub_pool_interval_byte = 20

const default_sub_pool_decay_period = time.Second * 30
const default_sub_pool_max_activity = 10000

type BufferPool struct {
	name             string
	pool             []*bytes2.BytesQueue
	mutexes          []*sync.Mutex
	activityCheckers []*activityChecker.ActivityChecker
	rootMutex        sync.RWMutex
	subPoolInterval  int

	subPoolDecayPeriod time.Duration
	subPoolMaxActivity int
}

func BufPoolInit(name string) (pull func(sizeHint int) *bytes2.Element, push func(bufElem *bytes2.Element)) {
	var intervals = []int{
		20, 1000, 100000,
	}
	var pools []*BufferPool
	for i, interval := range intervals {
		pools = append(pools, NewBufferPool(fmt.Sprintf("%s_%d", name, i), 50, interval, 0))
	}

	getPool := func(size int) *BufferPool {
		for i := len(intervals) - 1; i >= 0; i-- {
			if size > intervals[i] {
				return pools[i]
			}
		}
		return pools[0]
	}

	pull = func(sizeHint int) *bytes2.Element {
		return getPool(sizeHint).Pull(sizeHint)
	}
	/* 必须是len(bufElem.Value)，不能用cap，因为申请的时候是从哪个pool来的，回去的时候就必须回哪里去，
	因为不同pool的底层bucket的大小在一个subpool中是恒定的，但不同的pool是不同的，
	如果串pool就可能会在pull时因为实际slice的cap大小不足而panic*/
	push = func(bufElem *bytes2.Element) {
		getPool(len(bufElem.Value)).Push(bufElem)
	}

	return
}

var GlobalBufPull, GlobalBufPush = BufPoolInit("globalBufPool")

/*【】暂时没有对buf的总量做限定，可能在峰值的时候，内存占用很大，但在平常只使用其中的很小一部分，
视实际情况，可能会添加按活跃度来动态释放不活跃的内存
*/
func NewBufferPool(name string, subPoolNum int, subPoolInterval int, bufInitCount int) *BufferPool {
	if subPoolNum <= 0 {
		subPoolNum = default_sub_pool_num
	}
	if subPoolInterval <= 0 {
		subPoolInterval = default_sub_pool_interval_byte
	}
	if bufInitCount < 0 {
		bufInitCount = 0
	}
	rlt := &BufferPool{
		name:               name,
		pool:               make([]*bytes2.BytesQueue, subPoolNum),
		mutexes:            make([]*sync.Mutex, subPoolNum),
		activityCheckers:   make([]*activityChecker.ActivityChecker, subPoolNum),
		subPoolInterval:    subPoolInterval,
		subPoolDecayPeriod: default_sub_pool_decay_period,
		subPoolMaxActivity: default_sub_pool_max_activity,
	}

	for i := 0; i < subPoolNum; i++ {
		q := bytes2.NewBytesQueue()
		rlt.pool[i] = q
		bufElemSize := rlt.getSubPoolBufElemSize(i)
		for i := 0; i < bufInitCount; i++ {
			q.PushBack(&bytes2.Element{Value: make([]byte, bufElemSize)})
		}
		rlt.mutexes[i] = &sync.Mutex{}
		rlt.activityCheckers[i] = activityChecker.NewActivityChecker(fmt.Sprintf("%s_ac_%d", name, i), rlt.subPoolDecayPeriod, rlt.subPoolMaxActivity)
	}

	go rlt.subPoolCleanUp()

	return rlt
}

func (self *BufferPool) subPoolCleanUp() {
	cleanUpFunc := func(m *sync.Mutex, a *activityChecker.ActivityChecker) {
		m.Lock()
		defer m.Unlock()

		a.DecayRough()

		// 【待加入】do clean up
		// .....
	}

	c := time.NewTicker(self.subPoolDecayPeriod).C
	for {
		<-c
		/*
			if common.OPEN_BUFFERPOOL_GET_KEEP_BUF_INFO {
				// module name set empty, avoid long string alloc again
				log.Record("", self.getKeepBufInfo())
			}
		*/
		func() {
			self.rootMutex.RLock()
			defer self.rootMutex.RUnlock()

			poolLen := len(self.pool)
			for i := 0; i < poolLen; i++ {
				cleanUpFunc(self.mutexes[i], self.activityCheckers[i])
			}
		}()
	}
}

func (self *BufferPool) extendPool(maxSubPoolIdx int) {
	self.rootMutex.Lock()
	defer self.rootMutex.Unlock()

	if maxSubPoolIdx < len(self.pool) {
		return
	}

	log.Record(self.name, "pool extend from %d to %d", len(self.pool), maxSubPoolIdx+1)

	for maxSubPoolIdx >= len(self.pool) {
		//  because of len(self.pool) need, so activityCheckers first append
		self.activityCheckers = append(self.activityCheckers, activityChecker.NewActivityChecker(fmt.Sprintf("%s_ac_%d", self.name, len(self.pool)), default_sub_pool_decay_period, default_sub_pool_max_activity))
		self.pool = append(self.pool, bytes2.NewBytesQueue())
		self.mutexes = append(self.mutexes, &sync.Mutex{})
	}
}

// subpool的指针和mutex一旦生成，就不变了，可以被其它线程使用
func (self *BufferPool) getSubPoolByIndex(subPoolIdx int) (*bytes2.BytesQueue, *sync.Mutex, *activityChecker.ActivityChecker) {
	if subPoolIdx >= len(self.pool) {
		self.extendPool(subPoolIdx)
	}

	self.rootMutex.RLock()
	defer self.rootMutex.RUnlock()

	return self.pool[subPoolIdx], self.mutexes[subPoolIdx], self.activityCheckers[subPoolIdx]
}

func (self *BufferPool) getSubPool(sizeHint int) (q *bytes2.BytesQueue, m *sync.Mutex, a *activityChecker.ActivityChecker) {
	q, m, a = self.getSubPoolByIndex(self.getSubPoolIdex(sizeHint))
	return
}

func (self *BufferPool) getSubPoolIdex(sizeHint int) int {
	return sizeHint / self.subPoolInterval
}

/*
	若subPool的interval为20

	申请所得空间大小为 (hintSize/20 + 1)*20 - 1 = hintSize + 19，必定大于 hintSize

	pull, idx = hintSize/20, bufsize = (idx+1)*20-1 = idx*20+19
	push, targetSubPoolIdx = bufsize/20 = (idx*20+19)/20 = idx*20/20+19/20 = idx
	这样pull和push的buf的所属subpool就是一致的
*/
func (self *BufferPool) getSubPoolBufElemSize(subPoolIdx int) int {
	return (subPoolIdx+1)*self.subPoolInterval - 1
}

/*
【待加入】已经pull出去的buf都是在使用中的，根据activity释放的buf只可能是留在pool中的buf，
而且我们的目标也是释放长时间不使用的不活跃buf，根据活跃度清理残留buf，感觉上目前是合理的
*/
func (self *BufferPool) Pull(sizeHint int) *bytes2.Element {
	subPool, mutex, activityChecker := self.getSubPool(sizeHint)

	mutex.Lock()
	defer mutex.Unlock()

	activityChecker.AccessRough(1)

	if subPool.Len() > 0 {
		bufElem := subPool.Remove(subPool.Front())
		bufElem.Value = bufElem.Value[:sizeHint]
		return bufElem
	} else {
		//底层空间是（idx+1）*subPoolInterval - 1的固定大小，返回的时候slice成申请的大小
		buf := make([]byte, self.getSubPoolBufElemSize(self.getSubPoolIdex(sizeHint)))
		return &bytes2.Element{Value: buf[:sizeHint]}
	}
}

//外部调用者要确保push后的空间不会仍然被使用中，不然回收的空间被分配给其他调用者使用的话，造成两者同用一个空间，互相影响，后果不确定
func (self *BufferPool) Push(bufElem *bytes2.Element) {
	if bufElem == nil {
		panic(fmt.Errorf("BufferPool:%s, bufElem is nil", self.name))
	}
	if bufElem.Value == nil {
		panic(fmt.Errorf("BufferPool:%s, bufElem.Value is nil", self.name))
	}

	subPool, mutex, _ := self.getSubPool(cap(bufElem.Value))

	mutex.Lock()
	defer mutex.Unlock()

	subPool.PushBack(bufElem)
}

func (self *BufferPool) getKeepBufInfo() string {
	defer func() {
		if r := recover(); r != nil {
			// do nothing , just recover
		}
	}()

	self.rootMutex.RLock()
	defer self.rootMutex.RUnlock()

	var buf bytes.Buffer

	buf.WriteString(fmt.Sprintf("%s: subPoolCount:%d [", self.name, len(self.pool)))

	totalBytes := 0
	for i := 0; i < len(self.pool); i++ {
		// just get a int len, we dont care about the lock, even len is error on some unusual case, no harm
		count := self.pool[i].Len()
		if count > 0 {
			buf.WriteString(fmt.Sprintf("%d:%d, ", i, count))
		}

		totalBytes += count * (self.subPoolInterval*(i+1) - 1)
	}

	buf.WriteString(fmt.Sprintf("][totalBytes:%d]", totalBytes))

	return buf.String()
}
