package com.dxf.kafka.consumer;

import com.dxf.kafka.domain.KafkaMysql;
import com.dxf.kafka.domain.SysUser;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;

import java.time.Duration;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;

public class Consumer {
    private static final String TOPIC_NAME = "dxf";
    private final static String CONSUMER_GROUP_NAME = "testGroup";
    private static final Logger log = LoggerFactory.getLogger(Consumer.class);

    private static final Jedis jedis;

    private static final ObjectMapper objectMapper = new ObjectMapper();

    static {
        jedis = new Jedis("192.168.10.113", 6379);
    }


    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.10.113:9092,192.168.10.113:9093,192.168.10.113:9094");
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,CONSUMER_GROUP_NAME);


        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        /*
		consumer给broker发送心跳的间隔时间，broker接收到心跳如果此时有rebalance发生会通过心跳响应将
		rebalance方案下发给consumer，这个时间可以稍微短一点
		*/
        properties.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 1000);

         /*
        服务端broker多久感知不到一个consumer心跳就认为他故障了，会将其踢出消费组，
        对应的Partition也会被重新分配给其他consumer，默认是10秒
        */
        properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10 * 1000);

        //一次poll最大拉取消息的条数，如果消费者处理速度很快，可以设置大点，如果处理速度一般，可以设置小点
        properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 50);

        /*
        如果两次poll操作间隔超过了这个时间，broker就会认为这个consumer处理能力太弱，
        会将其踢出消费组，将分区分配给别的consumer消费  ——优胜劣汰，也是埋坑点，可能会消费不到消息，每次都被踢
        */
        properties.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 30 * 1000);
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, UserDeserializer.class.getName());



        KafkaConsumer<String, KafkaMysql> consumer = new KafkaConsumer<String, KafkaMysql>(properties);

        consumer.subscribe(Arrays.asList(TOPIC_NAME));



        while(true) {

            ConsumerRecords<String, KafkaMysql> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String,KafkaMysql> c : records) {
                log.info("接受到的key值：{},接受到的value{},消费的分区：{}", c.key(), c.value(), c.partition());
                List<SysUser> datas = c.value().getData();

                for (SysUser user: datas) {


                    try {
                        jedis.set("sysUser_"+user.getUserId(),objectMapper.writeValueAsString(user));
                    } catch (JsonProcessingException e) {
                        throw new RuntimeException(e);
                    }

                }

            }
        }


    }

}
