package com.kafka.dongli.consumer;

import com.kafka.dongli.model.User;
import com.kafka.dongli.util.JSONUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.messaging.handler.annotation.Headers;
import org.springframework.messaging.handler.annotation.Payload;
import org.springframework.messaging.handler.annotation.SendTo;
import org.springframework.stereotype.Component;

import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;

/**
 * @ClassName: EventConsumer
 * @Description:
 * @Author: 橙哈哈
 * @Date: 2025-09-19 23:50
 **/
@Component
public class EventConsumer {
    /**
     * 接收简单消息 消费者必须指定消费组 不然会报错
     * @param event
     * 集群消费（Clustering）：集群消费模式下，相同 Consumer Group 的每个 Consumer 实例平均分摊消息
     */
    @KafkaListener(topics = { "simple-topic"}, groupId = "simple-group")
    public void simple(String event) {
        System.out.println("simple---接收到消息：" + event);
    }


    /**
     * 方法一
     * 获取 通过构建器模式创建Message对象，同时获取消息体和头信息
     */
    @KafkaListener(topics = "message-topic", groupId = "message-group")
    public void consumeMessage(
            @Payload String payload,  // 获取消息体内容
            @Header(value = KafkaHeaders.RECEIVED_TOPIC) String topic, // 获取消息头中的主题名
            @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition // 获取消息所在分区
            //@Header(KafkaHeaders.OFFSET) long offset,  // 获取消息偏移量
            //@Header(KafkaHeaders.TIMESTAMP) long timestamp  // 获取消息时间戳
    ) {
        // 打印获取到的信息
        System.out.println("=== 收到Kafka消息 ===");
        System.out.println("消息体: " + payload);
        System.out.println("头信息中的主题名: " + topic);
        System.out.println("消息所在分区: " + partition);
        System.out.println("---------------------");
    }

    /**
     * 另一种方式：获取所有头信息（适合动态获取未知头信息的场景）
     */
    //@KafkaListener(topics = "message-topic", groupId = "message-group")
    public void consumeWithAllHeaders(
            @Payload String payload,
            @Headers Map<String, Object> headers  // 所有头信息的Map集合
    ) {
        System.out.println("=== 收到Kafka消息（全量头信息） ===");
        System.out.println("消息体: " + payload);
        System.out.println("所有头信息:");
        // 遍历所有头信息
        headers.forEach((key, value) -> {
            System.out.println(key + ": " + value);
        });
        System.out.println("---------------------");
    }


    @KafkaListener(topics = { "test-topic-Headers"}, groupId = "hello-group")
    public void onEvent04( @Payload String message,
                           @Headers Map<String, Object> allHeaders) {
        System.out.println("消息体: " + message);
        System.out.println("所有头信息:");

        // 遍历所有头信息
        for (Map.Entry<String, Object> entry : allHeaders.entrySet()) {
            String key = entry.getKey();
            Object value = entry.getValue();

            // 自定义头信息是字节数组，需要转字符串；内置头信息可能是其他类型（如数字）
            if (value instanceof byte[]) {
                System.out.println(key + ": " + new String((byte[]) value, StandardCharsets.UTF_8));
            } else {
                System.out.println(key + ": " + value);
            }
        }
        System.out.println("---------------------");
    }


    @KafkaListener(groupId = "partition-group",
            topicPartitions = @TopicPartition(topic = "partition-topic", partitions = {"0", "1","2"}))
    public void partitionMessage(@Payload String record){
        System.out.println("指定分区的消息event:"+record);
    }

    @KafkaListener( groupId = "partition-group-2",
            topicPartitions = @TopicPartition(topic = "partition-topic", partitions = {"2"})) // 消费的分区不能超过配置的分区 数
    public void partitionMessage02(@Payload String event){
        System.out.println("02--指定分区的消息event:"+event);
    }


    @KafkaListener(groupId = "domain-group",
            topicPartitions = @TopicPartition(topic = "domain-topic", partitions = {"0"}))
    public void domain(@Payload User event){
        System.out.println("实体对象消息:"+event);
    }

    @KafkaListener(groupId = "json-group",
            topicPartitions = @TopicPartition(topic = "domain-topic", partitions = {"1"}))
    public void domainJson(@Payload String event){
        User user = JSONUtils.toBean(event, User.class);
        System.out.println("json转实体对象消息:"+user);
    }



    //@KafkaListener(topics = { "ack-topic"}, groupId = "${kafka.consumer.group}")
    public void handleAck(String userJSON,
                         @Payload ConsumerRecord<String, String> record,
                         Acknowledgment ack) {

        try {
            //收到消息后，处理业务
            System.out.println("读取到的事件4：" + record.toString());

            int a = 10 / 0;

            //业务处理完成，给kafka服务器确认
            ack.acknowledge(); //手动确认消息，就是告诉kafka服务器，该消息我已经收到了，默认情况下kafka是自动确认
        } catch (Exception e) {
            e.printStackTrace();
        }
    }


    @KafkaListener(topics = {"batchTopic"}, groupId = "batchGroup2")
    public void onEvent(List<ConsumerRecord<String, String>> records) {
        System.out.println("批量消费，records.size() = " + records.size() + "，records = " + records);
    }


    @KafkaListener(topics = {"turn-topic-a"}, groupId = "aGroup")
    @SendTo(value = "turn-topic-b")
    public String onEventA(ConsumerRecord<String, String> record) {
        System.out.println("消息A消费，records = " + record);
        return record.value() + "--forward message";
    }

    @KafkaListener(topics = {"turn-topic-b"}, groupId = "bGroup")
    public void onEventB(ConsumerRecord<String, String> record) {

        System.out.println("消息B消费，records = " + record);
    }



    @KafkaListener(topics = {"consumer-inter-topic"}, groupId = "intGroup", containerFactory = "ourKafkaListenerContainerFactory")
    public void onEvent(ConsumerRecord<String, String> record) {
        System.out.println("消息消费，records = " + record);
    }



}
