package consumer

import (
	"context"
	"encoding/json"
	"fmt"
	"github.com/IBM/sarama"
	"github.com/zeromicro/go-zero/core/logx"
	"github.com/zeromicro/go-zero/core/stores/cache"
	"go.uber.org/zap"
	"grpc-common/album/types/album"
	"listenboos-common/redis"
	model_redis "listenboos-common/redis/model"
	"search/dao"
	"search/internal/config"
	"search/kafka/saramax"
)

type ProcessConsumer struct {
	client      sarama.Client
	redisClient *redis.RedisClient
	//TODO:
	albumSearchDao *dao.AlbumElasticDAO
	cache          cache.Cache
}

func NewProcessConsumer(c config.Config,
	client sarama.Client,
) *ProcessConsumer {
	redisCache := cache.New(c.CacheRedis, nil,
		cache.NewStat("album"), nil, func(o *cache.Options) {})
	return &ProcessConsumer{
		client:         client,
		redisClient:    redis.NewRedisClient(),
		albumSearchDao: dao.NewAlbumElasticDAO(),
		cache:          redisCache,
	}
}
func (p *ProcessConsumer) Start() error {
	cg, err := sarama.NewConsumerGroupFromClient("process",
		p.client)
	if err != nil {
		zap.L().Error("KafkaAlbumAndTrackPlaySum系统kafka消息记录消费错误", zap.Error(err))
		return err
	}
	go func() {
		err = cg.Consume(context.Background(),
			[]string{"process"},
			saramax.NewHandler[model_redis.KafkaAlbumAndTrackPlaySum](p.ProcessConsume))
		if err != nil {
			// 记录日志
			zap.L().Error("KafkaAlbumAndTrackPlaySum系统kafka消息记录消费错误", zap.Error(err))
			//	return
		}
	}()
	return err
}

func (p *ProcessConsumer) ProcessConsume(msg *sarama.ConsumerMessage, t model_redis.KafkaAlbumAndTrackPlaySum) error {
	// 更新redis
	ctx := context.Background()
	result, err := p.redisClient.Client.SetNX(ctx, t.BusinessNo, 1, 20000).Result()
	if err != nil {
		logx.Errorf("kafka process consume error: %v", err)
		return err
	}
	if err != nil {
		return err
	}
	key := fmt.Sprintf("albumInfo:%d", t.AlbumId)
	var res album.FindAlbumInfoResp
	stat := &album.AlbumStatVo{}
	albumInfo := &album.DetailResp{}
	base := &album.BaseCategoryView{}
	anno := &album.Announcer{}
	res.BaseCategoryView = base
	res.Announcer = anno
	res.AlbumStatVo = stat
	res.AlbumInfo = albumInfo

	if result {
		//stat := &album.AlbumStatVo{}
		//albumInfo := &album.DetailResp{}
		//base := &album.BaseCategoryView{}
		//anno := &album.Announcer{}
		//res.BaseCategoryView = base
		//res.Announcer = anno
		//res.AlbumStatVo = stat
		//res.AlbumInfo = albumInfo

		err = p.cache.Get(key, &res)
		s, err := p.redisClient.Client.Get(ctx, key).Result()
		err = json.Unmarshal([]byte(s), &res)
		if res.AlbumInfo == nil || err != nil || res.AlbumInfo.Id == 0 {
			p.redisClient.Client.Del(ctx, key)
			logx.Errorf("kafka process consume error: %v", err)
			return err
		}
		if t.StatType == "playStatNum" {
			res.AlbumStatVo.PlayStatNum += t.Count
		}
		if t.StatType == "buyStatNum" {
			res.AlbumStatVo.BuyStatNum += t.Count
		}
		if t.StatType == "commentStatNum" {
			res.AlbumStatVo.CommentStatNum += t.Count
		}
		if t.StatType == "subscribeStatNum" {
			res.AlbumStatVo.SubscribeStatNum += t.Count
		}
		//	data, _ := json.Marshal(&res)
		p.redisClient.Client.Del(ctx, key)
		err = p.cache.Set(key, &res)
		if err != nil {
			logx.Errorf("更新redis error: %v", err)
			return err
		}
	}
	// 更新es
	//fmt.Println("插入es中")
	err = p.albumSearchDao.UpdateAlbumCount(ctx, t.AlbumId, t.StatType, t.Count)
	if err != nil {
		zap.L().Error("Consume update error:", zap.Error(err))
		return err
	}

	// 需要更新一下消息总数

	return nil
}
func NewProcessConsumers(consumer *ProcessConsumer) []Consumer {
	KafkaConsumers = append(KafkaConsumers, consumer)
	return KafkaConsumers
}
