package dao

import (
	"context"
	"fmt"
	"github.com/Shopify/sarama"
	"github.com/go-redis/redis/v8"
	"github.com/olivere/elastic/v7"
	"kee/config"
	"log"
	"time"
)

var dao = new(Dao)

type Dao struct {
	RedisClient         *redis.Client
	ProducerServerKafka *SyncProducerKafka
	ESClient            *elastic.Client
	ConsumerGroupClient *ConsumerGroupClientKafka
}

type SyncProducerKafka struct {
	SyncProducerKafka sarama.SyncProducer
}

type ConsumerGroupClientKafka struct {
	ConsumerGroupClient sarama.ConsumerGroup
}

// 初始基础配置
func NewDao(ctx context.Context, config *config.Config) *Dao {

	// kafka server
	kafkaConfig := sarama.NewConfig()
	kafkaConfig.Producer.Return.Successes = true
	kafkaConfig.Producer.Partitioner = sarama.NewRandomPartitioner
	client, err := sarama.NewClient(config.KafkaClientConfig.Brokers, kafkaConfig)
	if err != nil {
		panic(fmt.Sprintf("sarama.NewClient|config.KafkaClientConfig.Brokers=%v|err=%v", config.KafkaClientConfig.Brokers, err))
	}

	ProducerKafka, err := sarama.NewSyncProducerFromClient(client)
	if err != nil {
		panic(fmt.Errorf("sarama.NewSyncProducerFromClient|err=%v", err))
	}

	// kafka client
	configClient := sarama.NewConfig()

	switch config.KafkaClientConfig.Assignor {
	case "sticky":
		configClient.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky
	case "roundrobin":
		configClient.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin
	case "range":
		configClient.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange
	default:
		log.Panicf("Unrecognized consumer group partition assignor: %s", config.KafkaClientConfig.Assignor)
	}

	if config.KafkaClientConfig.Oldest {
		configClient.Consumer.Offsets.Initial = sarama.OffsetOldest
	}

	kafkaClient, err := sarama.NewConsumerGroup(config.KafkaClientConfig.Brokers, config.KafkaClientConfig.Group, configClient)
	if err != nil {
		panic(fmt.Errorf("sarama.NewConsumerGroup|err=%v", err))
	}

	// 如果是本机开启的es服务不需要加setsniff参数，此用例是用docker和局域网内开启的服务
	esClient, err := elastic.NewClient(elastic.SetURL(config.ESConfig.EsUrl), elastic.SetSniff(false))
	if err != nil {
		panic(fmt.Errorf("elastic.NewClient|err=%v", err))
	}

	// redis
	rdb := redis.NewClient(&redis.Options{
		Addr:         config.RedisConfig.Addr,
		Password:     config.RedisConfig.Password, // no password set
		DB:           config.RedisConfig.DB,       // use default DB
		DialTimeout:  time.Duration(config.RedisConfig.DialTimeout) * time.Millisecond,
		ReadTimeout:  time.Duration(config.RedisConfig.ReadTimeout) * time.Millisecond,
		WriteTimeout: time.Duration(config.RedisConfig.WriteTimeout) * time.Millisecond,
		PoolSize:     config.RedisConfig.PoolSize,
	})

	_, err = rdb.Ping(ctx).Result()
	if err != nil {
		panic(fmt.Errorf("rdb.Ping|err=%v", err))
	}
	syncProducerKafka := SyncProducerKafka{SyncProducerKafka: ProducerKafka}

	consumerGroupClientKafka := ConsumerGroupClientKafka{ConsumerGroupClient: kafkaClient}
	tmpDao := Dao{
		RedisClient:         rdb,
		ProducerServerKafka: &syncProducerKafka,
		ESClient:            esClient,
		ConsumerGroupClient: &consumerGroupClientKafka,
	}

	dao = &tmpDao

	return dao

}
