/**
 * @Author: oldTea
 * @CreateTime: 2024-09-05
 * @Description: kafka配置类
 * @Version: 1.0
 */
package com.ambition.demo.kafka.config;

import com.ambition.demo.kafka.dto.RecordKafkaDto;
import com.baomidou.mybatisplus.core.toolkit.StringUtils;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Stopwatch;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;

import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;
import org.springframework.web.bind.annotation.RestController;

import javax.annotation.PostConstruct;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;

/**
 *@Author: oldTea
 *@CreateTime: 2024-09-05
 *@Description: kafka配置类
 *@Version: 1.0
 */

@Component
@Slf4j
@EnableKafka
public class KafkaConfig {

    @Autowired
    ObjectMapper objectMapper;

    @Autowired
    KafkaTemplate kafkaTemplate;

    @Autowired
    RedisTemplate redisTemplate;

    public static final String ENABLE_AUTO_COMMIT_CONFIG = "enable.auto.commit";
    @KafkaListener(id = "dtsDoublecheckRecordApplicationHandle",
            topics = "dts_doublecheck_record_converge_topic",
            groupId = "dtsDoublecheckRecordApplicationHandle", clientIdPrefix = "dtsDoublecheckRecordDistributeHandle",
            concurrency = "1", batch = "true", properties = {"max.poll.interval.ms: 60000",ENABLE_AUTO_COMMIT_CONFIG + ": false",
            "max.poll.records: 1000"}, autoStartup = "true")
    public void dtsDoublecheckRecordApplicationHandle(List<String> recordList, Acknowledgment ack) {
        Stopwatch started = Stopwatch.createStarted();
        List<RecordKafkaDto> notSenderRecordList = new ArrayList<>();
        try {
            for (String recordDto : recordList) {
                RecordKafkaDto recordKafkaDto = objectMapper.readValue(recordDto, RecordKafkaDto.class);
                String application = recordKafkaDto.getApplication();
                redisTemplate.opsForSet().add("application",new String(application.getBytes(StandardCharsets.UTF_8), StandardCharsets.UTF_8));
            }
            kafkaTemplate.flush();
        } catch (Exception e) {
            e.printStackTrace();
            log.error("双防排查记录dts服务kafka分发出错", e);
            try {
                ack.nack(0, 1000 * 60);
            } catch (Exception exception) {
                exception.printStackTrace();
                log.error("双防排查记录dts服务kafka分发出错提nack出错", exception);
            }
            return;
        }

        long elapsed = started.elapsed(TimeUnit.MILLISECONDS);
        log.info("双防排查记录dts服务kafka分发成功,耗时毫秒:[{}],分发数量:[{}]危险源未找到companyCode的记录数量:[{}]",
                elapsed, recordList.size() - notSenderRecordList.size(), notSenderRecordList.size());
        // 消费消息确认
        try {
            ack.acknowledge();
        } catch (Exception e) {
            e.printStackTrace();
            log.error("双防排查记录dts服务kafka分发提交偏移量出错", e);
        }
    }
}
