package com.kafka.consumer;

import com.kafka.entity.ProcessedEventEntity;
import com.kafka.repository.ProcessedEventEntityRepository;
import org.apache.kafka.common.protocol.types.Field;
import org.example.ProductCreatedEvent;
import org.example.error.NotRetryableException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.messaging.handler.annotation.Payload;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.client.RestTemplate;

import java.nio.charset.StandardCharsets;

/**
 * @author Administrator
 */
@Component

public class ConsumerListener {
    private static final Logger LOGGER = LoggerFactory.getLogger(ConsumerListener.class);

    private ProcessedEventEntityRepository processedEventEntityRepository;

    public ConsumerListener(ProcessedEventEntityRepository repository) {
        this.processedEventEntityRepository = repository;
    }

    /**
     * TODO 这个参数指定了消费者在调用 poll 方法后，可以花费的最大时间来处理返回的消息批次。如果消费者在这段时间内没有调用 poll 方法，Kafka 会认为该消费者已经死亡，并触发重新平衡（rebalance）。
     * TODO max.poll.interval.ms这个参数很重要，值可以减少不必要的重新平衡，但可能会延迟故障检测，导致消息处理延迟。
     * TODO 值可以更快地检测到消费者故障并进行重新平衡，但可能会增加不必要的重新平衡频率，影响性能。
     * TODO 如果消费者A方法在超出该配置时间，则kafka会触发平衡把这条消息发送给其他消费者B，如果此时该消费者A结束后再次拉取消息会出现给B的消息又拉取了一次
     */
    @Transactional
    @KafkaListener(id = "three", clientIdPrefix = "three", topics = "huran-test", concurrency = "1", groupId = "ggg")
    public void handleGroup1(@Payload ProductCreatedEvent record,
                             @Header(name = KafkaHeaders.RECEIVED_KEY, required = false) String key,
                             @Header(name = KafkaHeaders.RECEIVED_TOPIC) String topic,
                             @Header(name = KafkaHeaders.RECEIVED_PARTITION) String partition,
                             @Header(name = KafkaHeaders.RECEIVED_TIMESTAMP) String ts
                          ) {

        ProcessedEventEntity byMessageId = processedEventEntityRepository.findByMessageId(key);
        if (byMessageId != null) {
            LOGGER.warn("========================重复消费========================");
            return;
        }
        LOGGER.info("========================handleGroup1========================");
        LOGGER.info(Thread.currentThread().getName() + "+收到消息:" + record.getTitle());
        LOGGER.info(Thread.currentThread().getName() + "+topic:" + topic);
        LOGGER.info(Thread.currentThread().getName() + "+partition:" + partition);
        LOGGER.info(Thread.currentThread().getName() + "+ts:" + ts);
        LOGGER.info(Thread.currentThread().getName() + "+key:" + key);
       // LOGGER.info(Thread.currentThread().getName() + "+messageId:" + messageId);

        LOGGER.info("========================handleGroup1========================");
        try {
         processedEventEntityRepository.save(new ProcessedEventEntity(key, record.getProductId()));
        } catch (Exception e) {
            throw new NotRetryableException(e);
        }

    }

    /**
     * 演示消费失败重试
     */
//    @KafkaListener(topics = "huran-test", concurrency = "3")
//    public void handle(ProductCreatedEvent record, Acknowledgment acknowledgment,
//                       @Header(name = KafkaHeaders.RECEIVED_KEY, required = false) String key,
//                       @Header(name = KafkaHeaders.RECEIVED_TOPIC) String topic,
//                       @Header(name = KafkaHeaders.RECEIVED_PARTITION) String partition,
//                       @Header(name = KafkaHeaders.RECEIVED_TIMESTAMP) String ts) {
//        LOGGER.info("=========================================================");
//        LOGGER.info(Thread.currentThread().getName() + "+收到消息:" + record.getTitle());
//        LOGGER.info(Thread.currentThread().getName() + "+topic:" + topic);
//        LOGGER.info(Thread.currentThread().getName() + "+partition:" + partition);
//        LOGGER.info(Thread.currentThread().getName() + "+ts:" + ts);
//        LOGGER.info(Thread.currentThread().getName() + "+key:" + key);
//        LOGGER.info("=========================================================");
//
//        acknowledgment.acknowledge();
//        String url = "http://localhost:8082";
//        try {
//            ResponseEntity<String> response = restTemplate.exchange(url, HttpMethod.GET, null, String.class);
//            if (response.getStatusCode().value() == HttpStatus.OK.value()) {
//                LOGGER.info("远程小消息响应:" + response.getBody());
//            }
//        } catch (ResourceAccessException e) {
//            System.out.println("===============>>>ResourceAccessException");
//            // acknowledgment.acknowledge();
//            LOGGER.error(e.getMessage());
//            throw new RetryableException(e);
//        } catch (HttpServerErrorException e) {
//            System.out.println("===============>>>HttpServerErrorException");
//            acknowledgment.acknowledge();
//            LOGGER.error(e.getMessage());
//            throw new NotRetryableException(e);
//        } catch (Exception e) {
//            LOGGER.error(e.getMessage());
//            throw new NotRetryableException(e);
//        }

    // }


//    /**
//     * 演示消费失败 不重试 发送到私信队列
//     * @param record
//     */
//    @KafkaListener(topics = "huran-test")
//    public void handle(ProductCreatedEvent record) {
//        if (true) {
//            throw new NotRetryableException("不重试异常，放到死信队列");
//        }
//        LOGGER.info("收到方法:{}", record.getTitle());
//    }


//    @KafkaListener(topics = "huran-test")
//    public void handle(ProductCreatedEvent record, Acknowledgment acknowledgment) {
//
//        LOGGER.info("收到方法:{}", record.getTitle());
//        acknowledgment.acknowledge();
//    }
}

//    @KafkaListener(
//            id = "myConsumerId",
//            containerFactory = "kafkaListenerContainerFactory",
//            topics = "huran-test",
//            groupId = "your-group-id",
//            clientIdPrefix = "myClientPrefix",
//            concurrency = "3",
//            autoStartup = "true"
//    )
//    public void handle(List<ConsumerRecord<String, ProductCreatedEvent>> records, Acknowledgment acknowledgment) {
//        try {
//            for (ConsumerRecord<String, ProductCreatedEvent> record : records) {
//                String topic = record.topic();
//                int partition = record.partition();
//                long offset = record.offset();
//                ProductCreatedEvent event = record.value();
//                System.out.println(event);
//
//            }
//
//            // 批量处理成功后手动确认
//            acknowledgment.acknowledge();
//        } catch (Exception e) {
//            LOGGER.error("处理消息失败: {}", e.getMessage(), e);
//            // 处理失败时可以选择不确认消息，让Kafka重新发送
//        }
//    }
//}
