package data

import (
	"context"
	managev1 "gitee.com/go-vulcanus/vulcanus/api/devmanage/v1"
	storagev1 "gitee.com/go-vulcanus/vulcanus/api/storage/v1"
	"gitee.com/go-vulcanus/vulcanus/conf"
	"github.com/Shopify/sarama"
	"github.com/go-kratos/kratos/contrib/registry/nacos/v2"
	"github.com/go-kratos/kratos/v2/log"
	"github.com/go-kratos/kratos/v2/registry"
	"github.com/go-kratos/kratos/v2/transport/http"
	"github.com/go-redis/redis/extra/redisotel"
	"github.com/go-redis/redis/v8"
	"github.com/google/wire"
	"github.com/nacos-group/nacos-sdk-go/clients"
	"github.com/nacos-group/nacos-sdk-go/common/constant"
	"github.com/nacos-group/nacos-sdk-go/vo"
	"time"
)

var DataProviderSet = wire.NewSet(NewData, NewRegistrar, NewDiscovery, NewKafka, NewDevManageClient, NewStorageClient)

type Data struct {
	Rdb *redis.Client
	Kp  *Kafka
	M1  managev1.DevManageHTTPClient
	S1  storagev1.StorageHTTPClient
}

func NewData(conf *conf.Data, k *Kafka, m1 managev1.DevManageHTTPClient, s1 storagev1.StorageHTTPClient) (*Data, func(), error) {
	rdb := redis.NewClient(&redis.Options{
		Addr:         conf.Redis.Addr,
		Password:     conf.Redis.Password,
		DB:           int(conf.Redis.Db),
		DialTimeout:  conf.Redis.DialTimeout.AsDuration(),
		WriteTimeout: conf.Redis.WriteTimeout.AsDuration(),
		ReadTimeout:  conf.Redis.ReadTimeout.AsDuration(),
	})
	rdb.AddHook(redisotel.TracingHook{})

	d := &Data{
		Rdb: rdb,
		Kp:  k,
		M1:  m1,
		S1:  s1,
	}

	return d, func() {
		if err := d.Rdb.Close(); err != nil {
			log.Error(err)
		}
		if err := d.Kp.producer.Close(); err != nil {
			log.Error(err)
		}
	}, nil
}

func NewRegistrar(conf *conf.Registry) registry.Registrar {
	sc := []constant.ServerConfig{
		*constant.NewServerConfig(conf.Nacos.Ip, conf.Nacos.Port),
	}

	cc := &constant.ClientConfig{
		NamespaceId:         conf.Nacos.NamespaceId,
		TimeoutMs:           conf.Nacos.Timeout,
		NotLoadCacheAtStart: true,
		CacheDir:            "/tmp/nacos/cache",
		LogDir:              "/tmp/nacos/log",
	}

	client, err := clients.NewNamingClient(
		vo.NacosClientParam{
			ClientConfig:  cc,
			ServerConfigs: sc,
		},
	)

	if err != nil {
		panic(err)
	}

	r := nacos.New(client, nacos.WithDefaultKind("http"), nacos.WithPrefix(""))
	return r
}

func NewDiscovery(conf *conf.Registry) registry.Discovery {
	sc := []constant.ServerConfig{
		*constant.NewServerConfig(conf.Nacos.Ip, conf.Nacos.Port),
	}

	cc := &constant.ClientConfig{
		NamespaceId:         conf.Nacos.NamespaceId,
		TimeoutMs:           conf.Nacos.Timeout,
		NotLoadCacheAtStart: true,
		CacheDir:            "/tmp/nacos/cache",
		LogDir:              "/tmp/nacos/log",
	}

	client, err := clients.NewNamingClient(
		vo.NacosClientParam{
			ClientConfig:  cc,
			ServerConfigs: sc,
		},
	)

	if err != nil {
		panic(err)
	}

	r := nacos.New(client, nacos.WithDefaultKind("http"), nacos.WithPrefix(""))
	return r
}

func NewDevManageClient(r registry.Discovery) managev1.DevManageHTTPClient {
	conn, err := http.NewClient(
		context.Background(),
		http.WithEndpoint("discovery:///iot-manage-server"),
		http.WithDiscovery(r),
	)
	if err != nil {
		panic(err)
	}
	return managev1.NewDevManageHTTPClient(conn)
}

func NewStorageClient(r registry.Discovery) storagev1.StorageHTTPClient {
	conn, err := http.NewClient(
		context.Background(),
		http.WithEndpoint("discovery:///iot-storage-server"),
		http.WithDiscovery(r),
	)
	if err != nil {
		panic(err)
	}
	return storagev1.NewStorageHTTPClient(conn)
}

type Kafka struct {
	producer sarama.AsyncProducer
}

func NewKafka(conf *conf.Data, confApp *conf.App) *Kafka {
	producer := newKafkaProducer(conf, confApp.Device.TraceMsg)
	return &Kafka{producer: producer}
}

func newKafkaProducer(conf *conf.Data, traceMsg bool) sarama.AsyncProducer {
	cfg := sarama.NewConfig()
	cfg.Version = sarama.V2_4_1_0
	cfg.Producer.RequiredAcks = sarama.WaitForLocal
	cfg.Producer.Partitioner = sarama.NewHashPartitioner
	cfg.Producer.Return.Successes = true
	cfg.Producer.Return.Errors = true
	cfg.Producer.Retry.Max = 3 // 设置重试3次
	cfg.Producer.Retry.Backoff = 100 * time.Millisecond
	producer, err := sarama.NewAsyncProducer(conf.Kafka.Addrs, cfg)
	if err != nil {
		panic(err)
	}

	// 这个部分一定要写，不然通道会被堵塞
	go func(producer sarama.AsyncProducer) {
		errors := producer.Errors()
		success := producer.Successes()
		for {
			select {
			case err := <-errors:
				if err != nil {
					log.Errorf("kafka send error: [%s]", err.Error())
				}
			case s := <-success:
				if s != nil {
					if traceMsg {
						log.Infof("kafka send success topic: [%s] message: [%s]", s.Topic, s.Value)
					}
				}
			}
		}
	}(producer)

	return producer
}

// SendMetadata 发送数据至元数据主题
func (k *Kafka) SendMetadata(message string) {
	k.Send("topic-iot-data-meta", message)
}

// SendDeviceData 发送数据至设备数据主题
func (k *Kafka) SendDeviceData(message string) {
	k.Send("topic-iot-data-device", message)
}

// SendStateData 发送数据至设备状态数据主题
func (k *Kafka) SendStateData(message string) {
	k.Send("topic-iot-state-device", message)
}

// SendDeviceUpgrade 发送数据至设备升级主题
func (k *Kafka) SendDeviceUpgrade(message string) {
	k.Send("topic-iot-upgrade-device", message)
}

// SendDeviceSetting 发送数据至设备下发配置主题
func (k *Kafka) SendDeviceSetting(message string) {
	k.Send("topic-iot-setting-device", message)
}

// SendControllerState 发送数据至控制器状态主题
func (k *Kafka) SendControllerState(message string) {
	k.Send("topic-iot-controller-state", message)
}

// SendDeviceInfo 发送数据至设备在线状态主题
func (k *Kafka) SendDeviceInfo(message string) {
	k.Send("topic-iot-info-device", message)
}

// SendPictureData 发送图片数据至指定主题
func (k *Kafka) SendPictureData(message string) {
	k.Send("topic-iot-data-picture", message)
}

// Send 发送数据至指定主题
func (k *Kafka) Send(topic string, message string) {
	k.producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.StringEncoder(message)}
}
