package zanalytics

import (
	"context"
	"sync"
	"sync/atomic"
	"time"

	"gitee.com/youkelike/ziam/zredis"
	"gitee.com/youkelike/zlog"
	msgpack "gopkg.in/vmihailenco/msgpack.v2"
)

const AnalyticsKeyName = "system-analytics"

var analytics *Analytics

type Analytics struct {
	// redis 客户端
	store zredis.AnalyticsHandler

	// 消费者数量
	poolSize int
	// 生产者会发送数据到这个 channel，
	// 所有消费者并发消费它
	recordsChan chan *AnalyticsRecord
	// 消费者接收消息后，会缓存到一个本地 channel，
	// 每个消费者本地 channel 大小
	workerBufferSize uint64
	// 最大上报间隔
	recordsBufferFlushInterval uint64

	// 运行状态
	shouldStop uint32
	// 用于消费者之间的同步
	poolWg sync.WaitGroup
}

func NewAnalytics(options *AnalyticsOptions, store zredis.AnalyticsHandler) *Analytics {
	ps := options.PoolSize
	// 所有消费者共用 channel 的大小
	rbs := options.RecordsBufferSize
	// 每个消费者本地 channel 的大小
	wbs := rbs / uint64(ps)
	zlog.Debugw("analytics pool worker buffer size", zlog.Uint64("workerBufferSize", wbs))

	recordsChan := make(chan *AnalyticsRecord, rbs)
	analytics = &Analytics{
		store:                      store,
		poolSize:                   ps,
		recordsChan:                recordsChan,
		workerBufferSize:           wbs,
		recordsBufferFlushInterval: options.FlushInterval,
	}

	return analytics
}

func GetAnalytics() *Analytics {
	return analytics
}

// 启动多个消费者
func (a *Analytics) Start() {
	a.store.Connect()

	atomic.SwapUint32(&a.shouldStop, 0)
	for i := 0; i < a.poolSize; i++ {
		a.poolWg.Add(1)
		go a.recordWorker()
	}
}

func (a *Analytics) Stop() {
	atomic.SwapUint32(&a.shouldStop, 1)

	close(a.recordsChan)

	a.poolWg.Wait()
}

// 生产者
func (a *Analytics) RecordHit(record *AnalyticsRecord) error {
	if atomic.LoadUint32(&a.shouldStop) > 0 {
		return nil
	}

	a.recordsChan <- record
	return nil
}

// 消费者
func (a *Analytics) recordWorker() {
	defer a.poolWg.Done()

	ticker := time.NewTicker(time.Duration(a.recordsBufferFlushInterval))
	defer ticker.Stop()

	// 消费者本地缓存
	recordsBuffer := make([][]byte, 0, a.workerBufferSize)

	for {
		var readyToSend bool
		select {
		case record, ok := <-a.recordsChan:
			// 收到管道关闭信号，把在 recordsBuffer 暂存的数据全部写入redis，
			if !ok {
				if len(recordsBuffer) > 0 {
					a.store.AppendToSetPipelined(context.Background(), AnalyticsKeyName, recordsBuffer)
				}
				return
			}

			// msgpack 比 json 更小更快
			encoded, err := msgpack.Marshal(record)
			if err != nil {
				zlog.Errorw("error encoding anlaytics data: %s", err.Error())
				continue
			}
			// 从管道收到的数据不直接写 redis，而是先放到 recordsBuffer 暂存区
			recordsBuffer = append(recordsBuffer, encoded)

			// 定量判断
			readyToSend = uint64(len(recordsBuffer)) == a.workerBufferSize

		case <-ticker.C:
			// 定时判断
			// 不管暂存区是否已满，超过指定时间间隔也要把数据上报到 redis
			readyToSend = true
		}

		if readyToSend && len(recordsBuffer) > 0 {
			a.store.AppendToSetPipelined(context.Background(), AnalyticsKeyName, recordsBuffer)
			recordsBuffer = recordsBuffer[:0]
		}
	}
}
