package kqutil

import (
	"context"
	"sync"
	"sync/atomic"
	"time"

	"github.com/segmentio/kafka-go"
)

type QueueConumer struct {
	batchSize      int32
	batchInterval  time.Duration
	commitInterval time.Duration

	capacity   int32
	tail       int32 // 缓冲区满哨兵，只准writer协程使用
	queue      []*kafka.Message
	readyQueue []bool // 标志消息是否消费成功

	commitOffset atomic.Int32
	readIndex    atomic.Int32
	writeIndex   atomic.Int32
	isFull       atomic.Bool

	rwMu         sync.RWMutex  // 用于保护commiter和worker间的并发读写
	waitNoFullCh chan struct{} // 通知缓冲区非满
	waitFullCh   chan struct{} // 通知缓冲区满
	waitReadyCh  chan struct{} // 通知有新消息处理完成
	waitBatchCh  chan struct{} // 通知分发批量数据
}

func NewQueueConumer(capacity int32) *QueueConumer {
	return &QueueConumer{
		batchSize:      100,
		batchInterval:  1,
		commitInterval: 5,

		capacity:   capacity,
		tail:       1,
		queue:      make([]*kafka.Message, capacity+1),
		readyQueue: make([]bool, capacity+1),

		waitNoFullCh: make(chan struct{}, 1),
		waitFullCh:   make(chan struct{}, 1),
		waitReadyCh:  make(chan struct{}, 1),
		waitBatchCh:  make(chan struct{}, 1),
	}
}

func (c *QueueConumer) writer() {
	for {
		c.waitNoFull()

		// todo：拉取kafka消息
		var msg kafka.Message

		writeIndex := c.writeIndex.Load()
		c.queue[writeIndex] = &msg
		c.writeIndex.Store(c.tail)
		c.tail = (c.tail + 1) % int32(len(c.queue))

		readIndex, qLen := c.readIndex.Load(), int32(len(c.queue))
		if (qLen+writeIndex-readIndex)%qLen >= c.batchSize {
			c.notifyBatch()
		}
	}
}

func (c *QueueConumer) waitNoFull() {
	for front := c.commitOffset.Load(); front == c.tail; front = c.commitOffset.Load() {
		c.isFull.Store(true)
		c.notifyFull()
		<-c.waitNoFullCh
	}
}

func (c *QueueConumer) notifyFull() {
	select {
	case c.waitFullCh <- struct{}{}:
	default:
	}
}

func (c *QueueConumer) notifyBatch() {
	select {
	case c.waitBatchCh <- struct{}{}:
	default:
	}
}

func (c *QueueConumer) worker() {
	for {
		start, end := c.waitBatch()
		if start == end {
			continue // 当前读取数据为空
		}

		// todo: 执行任务

		qLen := int32(len(c.queue))
		c.rwMu.RLock()
		for i := start; i != end; i = (i + 1) % qLen {
			c.readyQueue[i] = true
		}
		c.rwMu.RUnlock()

		c.notifyReady()
	}
}

func (c *QueueConumer) waitBatch() (int32, int32) {
	ctx, cancel := context.WithTimeout(context.TODO(), c.batchInterval*time.Second)
	defer cancel()
	for {
		writeIndex, readIndex, qLen := c.writeIndex.Load(), c.readIndex.Load(), int32(len(c.queue))
		if (qLen+writeIndex-readIndex)%qLen < c.batchSize {
			select {
			case <-c.waitBatchCh:
				continue
			case <-ctx.Done():
				newIndex := c.writeIndex.Load()
				if c.readIndex.CompareAndSwap(readIndex, newIndex) {
					return readIndex, newIndex
				}
			}
		} else {
			newIndex := (readIndex + c.batchSize) % qLen
			if c.readIndex.CompareAndSwap(readIndex, newIndex) {
				return readIndex, newIndex
			}
		}
	}
}

func (c *QueueConumer) notifyReady() {
	select {
	case c.waitReadyCh <- struct{}{}:
	default:
	}
}

func (c *QueueConumer) commiter() {
	ticker := time.NewTicker(c.commitInterval)
	defer ticker.Stop()
	for {
		select {
		case <-ticker.C:
			c.commit()
		case <-c.waitReadyCh:
			if c.isFull.Load() {
				c.commit()
			}
		case <-c.waitFullCh:
			if c.isFull.Load() {
				c.commit()
			}
		}
	}
}

func (c *QueueConumer) commit() {
	c.rwMu.Lock()
	defer c.rwMu.Unlock()

	index, qLen, count := c.commitOffset.Load(), int32(len(c.queue)), 0
	for c.readyQueue[index] {
		c.readyQueue[index] = false
		// todo: 收集分区偏移量
		count++
		index = (index + 1) % qLen
	}
	c.commitOffset.Store(index)
	// todo: 向kafka提交分区偏移量
	if count > 0 {
		c.isFull.Store(false)
		c.notifyNoFull()
	}
}

func (c *QueueConumer) notifyNoFull() {
	select {
	case c.waitNoFullCh <- struct{}{}:
	default:
	}
}
