package analyti

import (
	"fmt"
	"github.com/vmihailenco/msgpack/v5"
	"sync"
	"sync/atomic"
	"time"
)

const analyticsKeyName = "iam-system-analytics"

var analytics *Analytics

func GetAnalytics() *Analytics {
	return analytics
}

type Analytics struct {
	store                      AnalyticsHandler
	poolSize                   int
	recordsChan                chan *AnalyticsRecord
	workerBufferSize           uint64
	recordsBufferFlushInterval uint64
	shouldStop                 uint32
	poolWg                     sync.WaitGroup
}

type AnalyticsHandler interface {
	Connect() bool
	AppendToSetPipelined(string, [][]byte)
	GetAndDeleteSet(string) []interface{}
	SetExp(string, time.Duration) error // Set key expiration
	GetExp(string) (int64, error)       // Returns expiry of a key
}

// AnalyticsRecord encodes the details of a authorization request.
type AnalyticsRecord struct {
	TimeStamp int64  `json:"timestamp"`
	Username  string `json:"username"`
}

/*
*
analytics:

	enable: true # 设置为 true 后 iam-authz-server 会记录授权审计日志
	pool-size: 50 # 指定 worker 的个数，默认 50
	records-buffer-size:  2000 # 缓存的授权日志消息数
	flush-interval: 200 # 超时投递时间，单位：毫秒，0 < flush-interval <= 1000。
	enable-detailed-recording: true # 开启记录详情，详细记录的功能
	storage-expiration-time: 24h0m0s # key 过期时间
*/
func NewAnalytics(store AnalyticsHandler) *Analytics {
	recordsChan := make(chan *AnalyticsRecord, 2000)
	workerBufferSize := 200 / uint64(50) //每个worker处理多少
	analytics = &Analytics{
		store:                      store,
		poolSize:                   1,
		recordsChan:                recordsChan,
		workerBufferSize:           workerBufferSize,
		recordsBufferFlushInterval: 2000,
	}
	return analytics
}

// Start start the analytics service.
func (r *Analytics) Start() {
	r.store.Connect()
	// start worker pool
	atomic.SwapUint32(&r.shouldStop, 0)
	for i := 0; i < r.poolSize; i++ {
		r.poolWg.Add(1)
		go r.recordWorker()
	}
	// 等待 worker 池完成
	r.poolWg.Wait()
}

func (r *Analytics) recordWorker() {
	fmt.Println("......recordWorker")
	defer r.poolWg.Done()

	// this is buffer to send one pipelined command to redis
	// use r.recordsBufferSize as cap to reduce slice re-allocations
	recordsBuffer := make([][]byte, 0, r.workerBufferSize)

	// read records from channel and process
	//lastSentTS := time.Now()
	for {
		var readyToSend bool
		select {
		case record, ok := <-r.recordsChan:

			// check if channel was closed and it is time to exit from worker
			if !ok {
				// send what is left in buffer
				r.store.AppendToSetPipelined(analyticsKeyName, recordsBuffer)

				return
			}

			// we have new record - prepare it and add to buffer

			if encoded, err := msgpack.Marshal(record); err != nil {
				fmt.Printf("Error encoding analytics data: %s", err.Error())
			} else {
				recordsBuffer = append(recordsBuffer, encoded)
				fmt.Println("......收到channel数据" + record.Username)
			}

			// identify that buffer is ready to be sent
			readyToSend = uint64(len(recordsBuffer)) == r.workerBufferSize

			//case <-time.After(time.Duration(r.recordsBufferFlushInterval) * time.Millisecond):
			//	// nothing was received for that period of time
			//	// anyways send whatever we have, don't hold data too long in buffer
			//	readyToSend = true
		}
		fmt.Printf("Current capacity of recordsBuffer: %d\n", len(recordsBuffer))
		// send data to Redis and reset buffer
		//if len(recordsBuffer) > 0 && (readyToSend || time.Since(lastSentTS) >= 200) {
		if len(recordsBuffer) > 0 && (readyToSend) {
			fmt.Println("......真系保存了")
			r.store.AppendToSetPipelined(analyticsKeyName, recordsBuffer)
			recordsBuffer = recordsBuffer[:0]
			//lastSentTS = time.Now()
		}
	}
}

// RecordHit will store an AnalyticsRecord in Redis.
func (r *Analytics) RecordHit(record *AnalyticsRecord) error {
	// check if we should stop sending records 1st
	if atomic.LoadUint32(&r.shouldStop) > 0 {
		return nil
	}

	// just send record to channel consumed by pool of workers
	// leave all data crunching and Redis I/O work for pool workers
	r.recordsChan <- record

	return nil
}
