//go:build v4

package queue

import (
	"context"
	"sync"
	"sync/atomic"
	"unsafe"
)

// 修改为ring buffer

type ConcurrentBlockingQueue[T any] struct {
	data         []T
	mutex        *sync.Mutex
	notFullCond  *Cond
	notEmptyCond *Cond
	maxSize      int

	count int
	head  int
	tail  int

	zero T
}

func NewConcurrentBlockingQueue[T any](maxsize int) *ConcurrentBlockingQueue[T] {
	c := &sync.Mutex{}
	return &ConcurrentBlockingQueue[T]{
		// 即便是 ring buffer，一次性分配完内存，也是有缺陷的
		// 如果不想一开始就把所有的内存都分配好，可以用链表
		data:         make([]T, maxsize),
		mutex:        c,
		notFullCond:  NewCond(c),
		notEmptyCond: NewCond(c),
		maxSize:      maxsize,
	}
}

func (c *ConcurrentBlockingQueue[T]) EnQueue(ctx context.Context, data T) error {
	if ctx.Err() != nil {
		return ctx.Err()
	}
	c.mutex.Lock()

	if ctx.Err() != nil {
		return ctx.Err()
	}

	//控制住了超时
	for c.isFull() { //不能使用if
		err := c.notFullCond.WaitWithTimeout(ctx)
		if err != nil {
			//表示已经超时
			return err
		}
	}
	c.data[c.tail] = data
	c.tail++
	c.count++
	if c.tail == c.maxSize {
		c.tail = 0
	}
	c.notEmptyCond.Broadcast()

	c.mutex.Unlock()
	return nil
}
func (c *ConcurrentBlockingQueue[T]) DeQueue(ctx context.Context) (T, error) {
	var t T
	if ctx.Err() != nil {
		return t, ctx.Err()
	}
	c.mutex.Lock()
	for c.isEmpty() {
		err := c.notEmptyCond.WaitWithTimeout(ctx)
		if err != nil {
			return t, err
		}
	}

	t = c.data[c.head]
	c.data[c.head] = c.zero // 要用零值填充原本的位置，防止GC的时候不会回收
	c.head++
	if c.head == c.maxSize {
		c.head = 0
	}
	c.count--

	c.notFullCond.Broadcast()

	c.mutex.Unlock()
	return t, nil
}
func (c *ConcurrentBlockingQueue[T]) Len() uint64 {
	return uint64(c.count)
}
func (c *ConcurrentBlockingQueue[T]) isEmpty() bool {
	return c.count == 0
}

//mutex 重复加锁会导致死锁
//func (c *ConcurrentBlockingQueue[T]) IsFull() bool {
//	c.mutex.Lock()
//	defer c.mutex.Unlock()
//	return len(c.data) == c.maxSize
//}

func (c *ConcurrentBlockingQueue[T]) isFull() bool {
	return c.count == c.maxSize
}

// 两种思路
// 1、通过broadcast
// 2、转发信号
type Cond struct {
	L sync.Locker
	n unsafe.Pointer
}

func NewCond(l sync.Locker) *Cond {
	c := &Cond{L: l}
	n := make(chan struct{})
	c.n = unsafe.Pointer(&n)
	return c
}

// Waits for Broadcast calls. Similar to regular sync.Cond, this unlocks the underlying
// locker first, waits on changes and re-locks it before returning.
func (c *Cond) Wait() {
	n := c.NotifyChan()
	c.L.Unlock()
	<-n
	c.L.Lock()
}

// Same as Wait() call, but will only wait up to a given timeout.
func (c *Cond) WaitWithTimeout(ctx context.Context) error {
	n := c.NotifyChan()
	c.L.Unlock()
	select {
	case <-n:
		c.L.Lock()
		return nil
	case <-ctx.Done():
		c.L.Lock()
		return ctx.Err()
	}
}

// Returns a channel that can be used to wait for next Broadcast() call.
func (c *Cond) NotifyChan() <-chan struct{} {
	ptr := atomic.LoadPointer(&c.n)
	return *((*chan struct{})(ptr))
}

// Broadcast call notifies everyone that something has changed.
func (c *Cond) Broadcast() {
	n := make(chan struct{})
	ptrOld := atomic.SwapPointer(&c.n, unsafe.Pointer(&n))
	close(*(*chan struct{})(ptrOld))
}
