// Tencent is pleased to support the open source community by making
// 蓝鲸智云 - 监控平台/日志平台 (BlueKing - Monitor/Log) available.
// Copyright (C) 2017-2022 THL A29 Limited, a Tencent company. All rights reserved.
// Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
// You may obtain a copy of the License at http://opensource.org/licenses/MIT
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
// an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//

package queue

import (
	"context"
	"sync"
	"time"

	"github.com/TencentBlueKing/bkmonitor-kits/logger"
	"github.com/elastic/beats/libbeat/common"

	"github.com/TencentBlueKing/bk-collector/define"
)

type mapStrShard struct {
	mut       sync.Mutex
	batchSize int
	shard     map[int32][]common.MapStr
	gather    func(int32, ...common.MapStr) common.MapStr
}

func newMapStrShard(size int, gather func(int32, ...common.MapStr) common.MapStr) *mapStrShard {
	return &mapStrShard{
		batchSize: size,
		shard:     map[int32][]common.MapStr{},
		gather:    gather,
	}
}

func (s *mapStrShard) Put(dataId int32, data common.MapStr) common.MapStr {
	s.mut.Lock()
	defer s.mut.Unlock()

	s.shard[dataId] = append(s.shard[dataId], data)
	if len(s.shard[dataId]) >= s.batchSize {
		r := s.gather(dataId, s.shard[dataId]...)
		s.shard[dataId] = s.shard[dataId][:0]
		return r
	}
	return nil
}

func (s *mapStrShard) All(dest []common.MapStr) []common.MapStr {
	s.mut.Lock()
	defer s.mut.Unlock()

	for k, v := range s.shard {
		if len(v) > 0 {
			dest = append(dest, s.gather(k, s.shard[k]...))
			s.shard[k] = s.shard[k][:0]
		}
	}
	return dest
}

type batchQueue struct {
	ctx    context.Context
	cancel context.CancelFunc
	wg     sync.WaitGroup

	size         int
	interval     time.Duration
	items        chan common.MapStr
	tracesShard  *mapStrShard
	metricsShard *mapStrShard
	logsShard    *mapStrShard
}

func NewBatchQueue(size int, interval time.Duration) Queue {
	ctx, cancel := context.WithCancel(context.Background())
	q := &batchQueue{
		ctx:          ctx,
		cancel:       cancel,
		size:         size,
		interval:     interval,
		items:        make(chan common.MapStr, define.CoreNums()),
		tracesShard:  newMapStrShard(size, NewTracesMapStr),
		metricsShard: newMapStrShard(size, NewMetricsMapStr),
		logsShard:    newMapStrShard(size, NewLogsMapStr),
	}
	go q.loop()
	return q
}

func (q *batchQueue) loop() {
	q.wg.Add(1)
	defer q.wg.Done()

	if q.interval <= 0 {
		return
	}
	ticker := time.NewTicker(q.interval)
	defer ticker.Stop()
	var dest []common.MapStr

	for {
		select {
		case <-q.ctx.Done():
			return

		case <-ticker.C:
			dest = q.metricsShard.All(dest)
			dest = q.tracesShard.All(dest)
			dest = q.logsShard.All(dest)
			if len(dest) <= 0 {
				continue
			}

			for _, m := range dest {
				q.items <- m
			}
			logger.Infof("batch queue tick msg count: %d", len(dest))
			dest = dest[:0]
		}
	}
}

func (q *batchQueue) Close() {
	q.cancel()
	q.wg.Wait()
}

func (q *batchQueue) Put(event define.Event) {
	switch event.RecordType() {
	case define.RecordTraces:
		if out := q.tracesShard.Put(event.DataId(), event.Data()); out != nil {
			q.items <- out
		}
	case define.RecordMetrics:
		if out := q.metricsShard.Put(event.DataId(), event.Data()); out != nil {
			q.items <- out
		}
	case define.RecordLogs:
		if out := q.logsShard.Put(event.DataId(), event.Data()); out != nil {
			q.items <- out
		}
	}
}

func (q *batchQueue) Pop() <-chan common.MapStr {
	return q.items
}
