package mq

import (
	"context"
	"encoding/json"
	"sync"
	"time"

	"github.com/imdario/mergo"
	"github.com/redis/go-redis/v9"
	"github.com/ssgreg/repeat"
	"go.uber.org/atomic"
)

var defaultProducerOptions = ProducerOptions{
	MaxLen:            10000,
	ShardsCount:       10,                     // 分片队列的个数
	PendingBufferSize: 5000,                   // 本地消息缓存队列
	PipeBufferSize:    100,                    // 每个分片队列 pipeline 个数
	PipePeriod:        100 * time.Millisecond, // 分片队列每次发送前等待的最长时间
}

type ProducerOptions struct {
	Topic             string
	MaxLen            int64
	ShardsCount       int8
	PendingBufferSize int64
	PipeBufferSize    int64
	PipePeriod        time.Duration
	ErrorNotifier     Notifier
}

type Producer struct {
	*client
	c        chan string
	notifier Notifier
	opt      ProducerOptions
	wg       *sync.WaitGroup
	rate     atomic.Int32 // 生产速率
	counter  atomic.Int32 // 计数器，达到一定数量 trim stream
}

func NewProducer(ctx context.Context, opt ProducerOptions, rdsCli redis.UniversalClient) (*Producer, error) {
	if err := mergo.Merge(&opt, defaultProducerOptions); err != nil {
		return nil, err
	}

	cli, err := newClient(ctx, opt.Topic, "group", opt.ShardsCount, rdsCli)
	if err != nil {
		return nil, err
	}

	pr := &Producer{
		c:        make(chan string, opt.PendingBufferSize),
		client:   cli,
		notifier: opt.ErrorNotifier,
		opt:      opt,
		wg:       &sync.WaitGroup{},
	}

	pr.wg.Add(1)

	go pr.produce(ctx)
	go pr.showState()
	return pr, nil
}

func (p *Producer) showState() {
	for range time.NewTicker(time.Second).C {
		if p.notifier != nil {
			p.notifier.AmiState(&State{
				Rate:    p.rate.Load(),
				Latency: 0,
			})
		}
		p.rate.Store(0)
	}
}

func (p *Producer) Close() {
	close(p.c)
	p.wg.Wait()
}

func (p *Producer) Send(m *Package) {
	if m == nil {
		return
	}

	data, err := json.Marshal(m)
	if err != nil {
		if p.notifier != nil {
			p.notifier.AmiError(err)
		}
		return
	}

	p.c <- string(data)
	p.rate.Inc()
}

func (p *Producer) produce(ctx context.Context) {
	shard := 0

	buf := make([]string, 0, p.opt.PipeBufferSize)
	started := time.Now()
	tick := time.NewTicker(p.opt.PipePeriod)

	for {
		var doStop bool

		select {
		case m, more := <-p.c:
			if !more {
				doStop = true
			} else {
				buf = append(buf, m)
			}
		case <-tick.C:
		}

		if doStop {
			p.sendWithLock(ctx, shard, buf)
			break
		}

		var doSend bool

		switch {
		case len(buf) >= int(p.opt.PipeBufferSize):
			doSend = true
		case time.Since(started) >= p.opt.PipePeriod && len(p.c) == 0:
			doSend = true
		default:
			doSend = false
		}

		if !doSend {
			continue
		}

		p.sendWithLock(ctx, shard, buf)

		buf = buf[0:0]
		started = time.Now()

		if shard++; shard >= int(p.opt.ShardsCount) {
			shard = 0
		}
	}

	p.wg.Done()
}

func (p *Producer) sendWithLock(ctx context.Context, shard int, buf []string) {
	length := len(buf)
	if length == 0 {
		return
	}

	p.counter.Add(int32(length))
	args := make([]redis.XAddArgs, length)
	topic := makeTopicName(p.opt.Topic, shard)

	for i, m := range buf {
		args[i] = redis.XAddArgs{
			ID:     "*",
			Stream: topic,
			Values: map[string]interface{}{dataField: m, verFiled: v1},
		}
	}

	p.wg.Add(1)

	go func() {
		p.send(ctx, args)
		if p.counter.Load() > 100 {
			p.counter.Store(0)
			p.rds.XTrimMaxLen(ctx, topic, p.opt.MaxLen)
		}
		p.wg.Done()
	}()
}

func (p *Producer) send(ctx context.Context, args []redis.XAddArgs) {
	err := repeat.Repeat(
		repeat.Fn(func() error {
			pipe := p.rds.TxPipeline()

			for _, m := range args {
				pipe.XAdd(ctx, &m)
			}

			_, err := pipe.Exec(ctx)
			if err != nil {
				if p.notifier != nil {
					p.notifier.AmiError(err)
				}

				return repeat.HintTemporary(err)
			}

			return nil
		}),
		repeat.StopOnSuccess(),
		repeat.WithDelay(repeat.FullJitterBackoff(500*time.Millisecond).Set()),
	)

	if err != nil && p.notifier != nil {
		p.notifier.AmiError(err)
	}
}
