package service

import (
	"context"
	"fmt"
	cluster "github.com/bsm/sarama-cluster"
	consul "github.com/go-kratos/consul/registry"
	"github.com/go-kratos/kratos/v2/log"
	"github.com/go-kratos/kratos/v2/registry"
	"github.com/golang/protobuf/proto"
	"github.com/hashicorp/consul/api"
	pb "go_private_im/api/logic"
	"go_private_im/internal/job/conf"
	"net/url"
	"strings"
	"sync"
)

type JobService struct {
	c            *conf.Config
	consumer     *cluster.Consumer
	cometServers map[string]*Comet

	rooms      map[string]*Room
	roomsMutex sync.RWMutex
	log *log.Helper
}

func NewJobService(config *conf.Config,logeer log.Logger) *JobService {
	j :=  &JobService{
		c: config,
		consumer: newKafkaSub(config.Kafka),
		log: log.NewHelper(logeer),
		rooms: make(map[string]*Room),
	}
	j.watchComet(j.c.Consul)
	return  j
}
func newKafkaSub(c *conf.Kafka) *cluster.Consumer {
	config := cluster.NewConfig()
	fmt.Println(config,"dadwadawwaada")
	config.Consumer.Return.Errors = true
	config.Group.Return.Notifications = true
	consumer, err := cluster.NewConsumer(c.Brokers, c.Group, []string{c.Topic}, config)
	fmt.Println(consumer)
	if err != nil {
		panic(err)
		fmt.Println("22222222222222")
	}
	return consumer
}

func (j *JobService) Run() {
	go j.Consume()
}

func NewConsul() *api.Client  {
	config := api.DefaultConfig()
	config.Address = "192.168.101.170:8500"
	client, err := api.NewClient(config)
	if err != nil {
		panic(err)
	}
	return client
}

func (j *JobService) watchComet(c *conf.Consul) {
	dis := NewConsul()
	aa := consul.New(dis)
	aa1,_ :=aa.Watch(context.Background(),"private.comet")
	var  zoneIns []*registry.ServiceInstance
	var err error
	go func() {
		for  {
			if zoneIns,err = aa1.Next(); err != nil{
				break
			}
			j.newAddress(zoneIns)
		}
	}()


}

func (j *JobService) newAddress(insMap []*registry.ServiceInstance) error {
	comets := map[string]*Comet{}
	for _, in := range insMap {
		host := ""
		fmt.Println(in,"adadadada")
		for _, addrs := range in.Endpoints {
			u, err := url.Parse(addrs)
			if err == nil && u.Scheme == "grpc" {
				host = strings.Split(u.Host,":")[0]
			}
		}
		if old, ok := j.cometServers[host]; ok {
			comets[host] = old
			continue
		}
		c,err := NewComet(in, j.c.Comet)
		if err != nil {
			j.log.Errorf("watchComet NewComet(%+v) error(%v)", in, err)
			return err
		}
		comets[host] = c
		j.log.Infof("watchComet AddComet grpc:%+v", in)
	}
	for key, old := range j.cometServers {
		if _, ok := comets[key]; !ok {
			old.cancel()
			j.log.Infof("watchComet DelComet:%s", key)
		}
	}
	j.cometServers = comets
	return nil
}

// Close close resounces.
func (j *JobService) Close() error {
	if j.consumer != nil {
		return j.consumer.Close()
	}
	return nil
}

// Consume messages, watch signals
func (j *JobService) Consume() {
	fmt.Println(j.consumer)
	for {
		select {
		case err := <-j.consumer.Errors():
			fmt.Println(2222222222)
			j.log.Errorf("consumer error(%v)", err)
		case n := <-j.consumer.Notifications():
			fmt.Println(22222222111111111)
			j.log.Infof("consumer rebalanced(%v)", n)
		case msg, ok := <-j.consumer.Messages():
			if !ok {
				return
			}
			j.consumer.MarkOffset(msg, "")
			// process push message
			pushMsg := new(pb.PushMsg)
			if err := proto.Unmarshal(msg.Value, pushMsg); err != nil {
				j.log.Errorf("proto.Unmarshal(%v) error(%v)", msg, err)
				continue
			}
			fmt.Println("222222",pushMsg.Keys,33333333333)
			if err := j.push(context.Background(), pushMsg); err != nil {
				j.log.Errorf("j.push(%v) error(%v)", pushMsg, err)
			}
			j.log.Infof("consume: %s/%d/%d\t%s\t%+v", msg.Topic, msg.Partition, msg.Offset, msg.Key, pushMsg)
		}
	}
}