package worker

import (
	"context"
	"crontab/common"
	"encoding/json"
	"github.com/Shopify/sarama"
	"go.mongodb.org/mongo-driver/mongo"
	"go.mongodb.org/mongo-driver/mongo/options"
	"math/rand"
	"strconv"
	"strings"
	"time"
)

type LogPersist struct {
	client         *mongo.Client
	logCollect     *mongo.Collection
	logChan        chan *common.JobLog
	autoCommitChan chan *common.LogBatch //用来提交通知
	failTime       map[string]int
	kafkaBroker    *common.KafkaBackOff
	kafkaClient    interface{}
}

var (
	G_LogPersist *LogPersist
)

func (logPersist *LogPersist) syncProduer() (err error) {
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Partitioner = sarama.NewRandomPartitioner
	config.Producer.Return.Successes = true
	config.Producer.Timeout = time.Duration(G_Config.KafkaTimeout) * time.Millisecond
	G_LogPersist.kafkaClient, err = sarama.NewSyncProducer(strings.Split(G_Config.KafkaBrokers, ","), config)
	return
}
func (logPersist *LogPersist) asyncProduer() (err error) {
	config := sarama.NewConfig()
	config.Producer.Return.Successes = true
	config.Producer.Timeout = time.Duration(G_Config.KafkaTimeout) * time.Millisecond
	G_LogPersist.kafkaClient, err = sarama.NewAsyncProducer(strings.Split(G_Config.KafkaBrokers, ","), config)
	return
}
func initKafkaProducer() (err error) {
	G_LogPersist.kafkaBroker.KafkaAcks = 0
	G_LogPersist.kafkaBroker.KafkaTimeout = G_Config.KafkaTimeout
	G_LogPersist.kafkaBroker.KafkaTopics = G_Config.KafkaTopics
	G_LogPersist.kafkaBroker.KafkaBrokers = G_Config.KafkaBrokers
	G_LogPersist.kafkaBroker.KafkaPartitioner = G_Config.KafkaPartitioner
	if -1 == G_Config.KafkaAcks {
		G_LogPersist.kafkaBroker.KafkaAcks = -1
		//表示同步生产者
		err = G_LogPersist.syncProduer()
	} else {
		if err = G_LogPersist.asyncProduer(); err == nil {
			//此时启动消费者
			asyncProduce := G_LogPersist.kafkaClient.(sarama.AsyncProducer)
			//必须有这个匿名函数内容
			go func(p sarama.AsyncProducer) {
				errors := p.Errors()
				success := p.Successes()
				for {
					select {
					case err := <-errors:
						if err != nil {

						}
					case <-success:

					}
				}
			}(asyncProduce)
		}

	}
	return
}
func InitMongoDB() (err error) {
	var (
		client *mongo.Client
		ctx    context.Context
	)
	if client, err = mongo.NewClient(options.Client().ApplyURI(G_Config.DbUrl)); err != nil {
		return
	}
	ctx, _ = context.WithTimeout(context.TODO(), time.Duration(G_Config.DbConnectTimeout)*time.Millisecond)
	if err = client.Connect(ctx); err != nil {
		return
	}
	G_LogPersist = &LogPersist{
		client:         client,
		logCollect:     client.Database(G_Config.DbName).Collection(G_Config.TableName),
		logChan:        make(chan *common.JobLog, G_Config.QueueSize),
		autoCommitChan: make(chan *common.LogBatch, 1000),
		failTime:       make(map[string]int, 0),
		kafkaBroker:    &common.KafkaBackOff{},
	}
	//表示kafka出问题了，此时我们需要进行检查的
	if err = initKafkaProducer(); err != nil {
		return
	}
	go G_LogPersist.LogSave()
	return
}

//批量插入数据
func (logPersist *LogPersist) LogBatchSave(logBatch *common.LogBatch) {
	_, _ = logPersist.logCollect.InsertMany(context.TODO(), logBatch.LogItems)
}

func (logPersist *LogPersist) LogSave() {
	var (
		log      *common.JobLog
		logBatch *common.LogBatch //当前的批次
		//超时自动提交
		commitTimer *time.Timer
		timeoubatch *common.LogBatch
	)
	for {
		select {
		case log = <-logPersist.logChan:

			//数据写入mongodb中,我们选择批量插入
			if logBatch == nil {
				logBatch = &common.LogBatch{}
				//此时两个bacth就无关了
				commitTimer = time.AfterFunc(time.Duration(G_Config.LogCommitDelayTime)*time.Millisecond, func(batch *common.LogBatch) func() {
					return func() {
						logPersist.autoCommitChan <- batch
					}
				}(logBatch))
			}
			logBatch.LogItems = append(logBatch.LogItems, log)
			if len(logBatch.LogItems) >= G_Config.LogBatchSize {
				logPersist.LogBatchSave(logBatch)
				logBatch = nil
				//此处取消定时器
				commitTimer.Stop()
			}
		case timeoubatch = <-logPersist.autoCommitChan: //过期的批次
			//判断过期批次是否是要提交的批次,如果是我们才会提交，否则的话，我们不能重复提交
			if timeoubatch != logBatch {
				continue
			}
			logPersist.LogBatchSave(timeoubatch)
			logBatch = nil
		}
	}
}

//负责日志的发送,这个队列可能会满掉，这个后期在优化吧
func (logPersist *LogPersist) AppendLog(jobLog *common.JobLog) {

	select {
	case logPersist.logChan <- jobLog:
	default:
		//如果队列满了，我们会丢弃，这个以后可以当做一个指标,提供日志的补偿
		if G_Config.LogRetryTime == 0 {
			G_Config.LogRetryTime = 1
		}
		logPersist.failTime[jobLog.JobName]++
		if logPersist.failTime[jobLog.JobName] > G_Config.LogRetryTime && G_Config.LogRetryTime > 1 {
			logPersist.AppendLog(jobLog)
		} else {
			if G_Config.LogRetryTime > 1 {
				jobMarshal, _ := json.Marshal(jobLog)
				v := strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Intn(10000)) + string(jobMarshal)
				//表明需要进行持久化,同步
				if logPersist.kafkaBroker.KafkaAcks == -1 {

					msg := &sarama.ProducerMessage{
						Topic: logPersist.kafkaBroker.KafkaTopics,
						Value: sarama.ByteEncoder(v),
					}
					syncProduce := logPersist.kafkaClient.(sarama.SyncProducer)
					if _, _, err := syncProduce.SendMessage(msg); err != nil {
						return
					}
				} else {
					asyncProduce := logPersist.kafkaClient.(sarama.AsyncProducer)
					msg := &sarama.ProducerMessage{
						Topic: logPersist.kafkaBroker.KafkaTopics,
						Value: sarama.ByteEncoder(v),
					}
					asyncProduce.Input() <- msg
				}
			}
		}
	}

}
