package com.bs.kafka.handler;

import com.bs.kafka.service.TestService;
import lombok.extern.log4j.Log4j2;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.listener.*;
import org.springframework.stereotype.Component;
import org.springframework.util.backoff.BackOff;
import org.springframework.util.backoff.BackOffExecution;
import org.springframework.util.backoff.FixedBackOff;

import javax.annotation.Resource;

/**
 * @author bingshao
 * @date 2022/10/14
 **/
@Component
@Log4j2
public class BusinessErrorHandler {


    @Resource
    private KafkaTemplate<String, Object> kafkaTemplate;

    @Resource
    private TestService testService;

//    @Bean("kafkaRetryHandler")
//    public KafkaListenerErrorHandler kafkaRetryHandler(KafkaTemplate<?, ?> template) {
//        return (ConsumerAwareListenerErrorHandler) (message, exception, consumer) -> {
//            log.error("异常异常=========>" + exception.getMessage());
//            // <1> 创建 DeadLetterPublishingRecoverer 对象
//            ConsumerRecordRecoverer recover = new DeadLetterPublishingRecoverer(template);
//            // <2> 创建 FixedBackOff 对象   设置重试间隔 1秒 次数为 3次
//            BackOff backOff = new FixedBackOff(1 * 1000L, 3L);
//            BackOffExecution execution = backOff.start();
//            ConsumerRecord record = (ConsumerRecord) message.getPayload();
//            while (true) {
//                long value = execution.nextBackOff();
//                if (value == BackOffExecution.STOP) {
//                    kafkaTemplate.send("test1.DLT", record.partition(), null, record.value());
//                    consumer.commitAsync();
//                } else {
//                    //在这里写你的重试逻辑
//                    try {
//                        int a = 100 / 0;
//                        System.out.println("简单消费1===Topic：" + record.topic() + "**分区" + record.partition() + "**值内容" + record.value() + "**位移" + record.offset());
//                        consumer.commitAsync();
//                    } catch (Exception e) {
//                        continue;
//                    }
//                }
//                break;
//            }
//            return null;
//        };
//    }

    @Bean("testTopicRetryHandler")
    public KafkaListenerErrorHandler testTopicRetryHandler() {
        return (ConsumerAwareListenerErrorHandler) (message, exception, consumer) -> {
            ConsumerRecord consumerRecord = (ConsumerRecord) (message.getPayload());
            int retryAttempts = 1;
            int maxRetryAttempts = 3;
            while (retryAttempts < maxRetryAttempts) {
                try {
                    testService.doSomeThing();
                    consumer.commitAsync((offsets, e) -> {
                        if (e == null) {
                            // 提交成功的处理逻辑
                            log.info("Offset commit successful for offsets: {}", offsets);
                        } else {
                            // 提交失败的处理逻辑
                            log.error("Offset commit failed for offsets: {}", offsets);
                        }
                    });
                } catch (Exception e) {
                    retryAttempts++;
                    if (retryAttempts == 3) {
                        // TODO 持久化消息
                        System.out.println("插入数据库信息" + consumerRecord.value().toString());
                        consumer.commitAsync((offsets, exp) -> {
                            if (exp == null) {
                                // 提交成功的处理逻辑
                                log.info("Offset commit successful for offsets: {}", offsets);
                            } else {
                                // 提交失败的处理逻辑
                                log.error("Offset commit failed for offsets: {}", offsets);
                            }
                        });
                    }
                }
            }
            return null;
        };
    }


}
