// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: wsfuyibing <682805@qq.com>
// Date: 2024-11-19

package sr

import (
	"gitee.com/go-libs/log"
	"gitee.com/go-libs/runtime"
	"gitee.com/gomq/sr/errs"
	"sync"
	"sync/atomic"
	"time"
)

const (
	defaultBucketBatch       = 300
	defaultBucketCapacity    = 9000
	defaultBucketConcurrency = 30
)

type (
	// Bucket is a component to store middle objects then call in
	// goroutines with async and batch.
	Bucket[T any] interface {
		// Add an item list to bucket and invoke callback handler in
		// goroutine with batch mode.
		Add(list ...T) (err error)

		// Count returns an amount of items that waiting to invoke handle
		// in memory.
		Count() (count int)

		// GetBatch returns the batch size for each pop.
		GetBatch() (batch int)

		// GetCapacity returns the max amount of items that can be stored in.
		GetCapacity() (capacity int)

		// GetConcurrency returns the concurrency amount for parallel to
		// invoke handler.
		GetConcurrency() (concurrency int32)

		// Name returns the bucket name.
		Name() string

		// SetBatch sets the batch size for each pop.
		SetBatch(batch int)

		// SetCapacity sets the max amount of items that can be stored in.
		SetCapacity(capacity int)

		// SetConcurrency sets the concurrency amount for invoke handlers
		// with parallel.
		SetConcurrency(concurrency int32)

		// Wait block the goroutine until all items done and items in
		// bucket is empty.
		Wait()
	}

	bucket[T any] struct {
		batch, capacity, total  int
		concurrency, processing int32
		handler                 func([]T)
		list                    []T
		mu                      *sync.Mutex
		name                    string
	}
)

// NewBucket creates a bucket instance to store items with given name and
// callable handler.
func NewBucket[T any](name string, handler func([]T)) Bucket[T] {
	return &bucket[T]{
		batch:       defaultBucketBatch,
		capacity:    defaultBucketCapacity,
		concurrency: defaultBucketConcurrency,

		handler: handler, list: make([]T, 0),
		mu: &sync.Mutex{}, name: name,
	}
}

func (o *bucket[T]) Add(list ...T) (err error) {
	n := len(list)

	// Return if the number of parameters is 0.
	if n == 0 {
		return
	}

	// Lock state when starts.
	o.mu.Lock()

	// Counting dynamic quantities to avoid memory overflow.
	if (o.total + n) >= o.capacity {
		o.mu.Unlock()
		runtime.GetCounter().MqCounter().IncrDiscardedCount(int64(n))
		err = errs.ErrBucketOutOfMemory
		return
	}

	// Update memory state.
	o.list = append(o.list, list...)
	o.total = len(o.list)
	o.mu.Unlock()
	runtime.GetCounter().MqCounter().IncrPushCount(int64(n))

	// Pop item immediately to call handler.
	go o.pop()
	return
}

func (o *bucket[T]) Count() (count int) {
	o.mu.Lock()
	defer o.mu.Unlock()
	count = len(o.list)
	return
}

func (o *bucket[T]) GetBatch() (batch int) {
	o.mu.Lock()
	defer o.mu.Unlock()
	batch = o.batch
	return
}

func (o *bucket[T]) GetCapacity() (capacity int) {
	o.mu.Lock()
	defer o.mu.Unlock()
	capacity = o.capacity
	return
}

func (o *bucket[T]) GetConcurrency() (concurrency int32) {
	o.mu.Lock()
	defer o.mu.Unlock()
	concurrency = o.concurrency
	return
}

func (o *bucket[T]) Name() string {
	return o.name
}

func (o *bucket[T]) SetBatch(batch int) {
	if batch > 0 {
		o.mu.Lock()
		o.batch = batch
		o.mu.Unlock()
	}
}

func (o *bucket[T]) SetCapacity(capacity int) {
	if capacity > 0 {
		o.mu.Lock()
		o.capacity = capacity
		o.mu.Unlock()
	}
}

func (o *bucket[T]) SetConcurrency(concurrency int32) {
	if concurrency > 0 {
		o.mu.Lock()
		o.concurrency = concurrency
		o.mu.Unlock()
	}
}

func (o *bucket[T]) Wait() {
	for {
		if atomic.LoadInt32(&o.processing) == 0 && o.Count() == 0 {
			break
		}
		time.Sleep(time.Millisecond * 10)
	}
}

// + Access

func (o *bucket[T]) pop() {
	var (
		list  []T
		count int
	)

	// Concurrency limit.
	if n := atomic.AddInt32(&o.processing, 1); n > o.concurrency {
		atomic.AddInt32(&o.processing, -1)
		return
	}

	// Clean when pop ends.
	defer func() {
		if r := recover(); r != nil {
			log.Fatalf(`[%s] popping caused an panic: %v`, o.name, r)
		}

		// Revert counter.
		atomic.AddInt32(&o.processing, -1)

		// Continue pop.
		if o.Count() > 0 {
			o.pop()
		}
	}()

	// Lock when starts.
	o.mu.Lock()

	// Return if bucket is empty.
	if o.total = len(o.list); o.total == 0 {
		o.mu.Unlock()
		return
	}

	// Pop from starts of the list.
	if o.total > o.batch {
		count = o.batch
		list = o.list[0:count]
		o.list = o.list[count:]
	} else {
		count = o.total
		list = o.list[0:count]
		o.list = make([]T, 0)
	}
	o.mu.Unlock()
	runtime.GetCounter().MqCounter().IncrPopCount(int64(count))

	// Call handler.
	o.handler(list)
}
