package data

import (
	kafka "github.com/Shopify/sarama"
	"github.com/go-kratos/kratos/v2/log"
	"github.com/gomodule/redigo/redis"
	"github.com/google/wire"
	"github.com/hashicorp/consul/api"
	"go_private_im/internal/logic/conf"
	"gorm.io/driver/mysql"
	"gorm.io/gorm"
	"gorm.io/gorm/logger"
	"gorm.io/gorm/schema"
	logs "log"
	"os"
	"time"
)

// ProviderSet is data providers.
var ProviderSet = wire.NewSet(NewData, NewLogicRepo)

// Data .
type Data struct {
	// TODO wrapped database client
	redis       *redis.Pool
	kafkaPub    kafka.SyncProducer
	log         *log.Helper
	consulClient *api.Client
	Db        *gorm.DB

}

// NewData .
func NewData(c *conf.Config, logger log.Logger,client *api.Client) *Data {
	return &Data{
		redis: newRedis(c.Redis),
		kafkaPub: newKafkaPub(c.Kafka),
		log: log.NewHelper(logger),
		consulClient: client,
		Db: NewDatabase(),
	}
}

func NewDatabase() *gorm.DB {
	dsn := "root:root@tcp(127.0.0.1:3306)/private_im?charset=utf8mb4&parseTime=True&loc=Local"
	newLogger := logger.New(
		logs.New(os.Stdout, "\r\n", logs.LstdFlags), // io writer
		logger.Config{
			SlowThreshold: time.Second,   // 慢 SQL 阈值
			LogLevel:      logger.Info, // Log level
			Colorful:      true,         // 禁用彩色打印
		},
	)
	db , err := gorm.Open(mysql.Open(dsn),&gorm.Config{
		Logger:newLogger,
		NamingStrategy: schema.NamingStrategy{   //表名称的约束
			SingularTable: true,    // 表名前缀，`User`表为`t_users`
		},
	})
	if err!= nil{
		panic(err)
	}
	return db
}

func newRedis(c *conf.Redis) *redis.Pool {
	return &redis.Pool{
		MaxIdle:     c.Idle,
		MaxActive:   c.Active,
		IdleTimeout: time.Duration(c.IdleTimeout),
		Dial: func() (redis.Conn, error) {
			conn, err := redis.Dial(c.Network, c.Addr,
				redis.DialConnectTimeout(c.DialTimeout),
				redis.DialReadTimeout(c.ReadTimeout),
				redis.DialWriteTimeout(c.WriteTimeout),
				redis.DialPassword(c.Auth),
			)
			if err != nil {
				return nil, err
			}
			return conn, nil
		},
	}
}

func newKafkaPub(c *conf.Kafka) kafka.SyncProducer {
	kc := kafka.NewConfig()
	kc.Producer.RequiredAcks = kafka.WaitForAll // Wait for all in-sync replicas to ack the message
	kc.Producer.Retry.Max = 10                  // Retry up to 10 times to produce the message
	kc.Producer.Return.Successes = true
	//kc.Producer.Partitioner = kafka.NewHashPartitioner   // 同一个key 哈希到 1个分区
	pub, err := kafka.NewSyncProducer(c.Brokers, kc)
	if err != nil {
		panic(err)
	}
	return pub
}

