package springboot.kafka.controller;

import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.utils.CopyOnWriteMap;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.util.CollectionUtils;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import springboot.kafka.dao.KafkaOffsetDao;
import springboot.kafka.entity.KafkaOffset;

import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;

@Slf4j
@RestController
@RequestMapping("/kafka")
public class KafkaController {

    @Autowired
    private KafkaOffsetDao consumerOffsetDao;

    @PostMapping("/send/{msg}")
    public String send(@PathVariable("msg") String msg) {
        String topic = "tsa-syslog";
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.182.14:9092");
        properties.put(ProducerConfig.ACKS_CONFIG, "all");
        properties.put(ProducerConfig.RETRIES_CONFIG, 1);
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
        properties.put(ProducerConfig.LINGER_MS_CONFIG, 1);
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        KafkaProducer<Object, Object> producer = new KafkaProducer<>(properties);
        producer.send(new ProducerRecord(topic, "key-" + msg.hashCode(), "val-" + msg));
        producer.close();
        log.info("oks");
        return "oks";
    }

    //
    @PostMapping("/test")
    public String consumer() {
        String group = "zway";
        String topic = "tsa-syslog";

        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.182.14:9092");
        props.put(ConsumerConfig.GROUP_ID_CONFIG, group);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Arrays.asList(topic));
        consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener() {

            //数据处理之后
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> collection) {
                log.info("······方法嵌入中······");
                for (TopicPartition topicPartition : collection) {
                    int partition = topicPartition.partition();
                    long offset = consumer.position(topicPartition);
                    commitOffset(group, topic, partition, offset);
                }
            }

            //数据处理之前
            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> collection) {
                log.info("······分区处理······");
                for (TopicPartition topicPartition : collection) {
                    int partition = topicPartition.partition();
                    long offset = getConsumerOffset(group, topic, partition);
                    consumer.seek(topicPartition, offset); //指定位置消费
                }
            }
        });
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(100);
            Map<String, List<KafkaOffset>> map = new HashMap<>();
            List<KafkaOffset> list = new ArrayList<>();
            if (0 != records.count()) {
                log.info("处理的数据条数：" + records.count());
            }
            for (ConsumerRecord<String, String> consumerRecord : records) {
                String topics = consumerRecord.topic();
                int p = consumerRecord.partition();
                log.info("{}消费到消息：partition=" + topics + p + ",key=" + consumerRecord.key() + ",val=" + consumerRecord.value() + ",offset=" + consumerRecord.offset(), getThread());
                KafkaOffset offsetEntity = new KafkaOffset();
                offsetEntity.setConsumerGroup(group);
                offsetEntity.setTopic(consumerRecord.topic());
                offsetEntity.setConsumerPartition(consumerRecord.partition() + "");
                offsetEntity.setConsumerOffset(consumerRecord.offset() + "");
                offsetEntity.setCreateTime(new Date());
                list = map.get(offsetEntity.getConsumerPartition());
                if (null == list) {
                    list = new ArrayList<>();
                }
                list.add(offsetEntity);
                map.put(offsetEntity.getConsumerPartition(), list);
            }
            if (!CollectionUtils.isEmpty(map)) {
                for (Map.Entry<String, List<KafkaOffset>> list1 : map.entrySet()) {
                    KafkaOffset kafkaOffset = list1.getValue().get(list1.getValue().size() - 1);
                    //consumerOffsetDao.updatebyKey(kafkaOffset);
                    consumerOffsetDao.insert(kafkaOffset);
                    consumer.commitSync();
                    log.info("{}===偏移量提交成功===" + kafkaOffset, getThread());
                }
            }
        }
    }

    private long getConsumerOffset(String group, String topic, int partition) {
        LambdaQueryWrapper<KafkaOffset> wrapper = new LambdaQueryWrapper<>();
        wrapper.eq(KafkaOffset::getConsumerGroup, group);
        wrapper.eq(KafkaOffset::getTopic, topic);
        wrapper.eq(KafkaOffset::getConsumerPartition, partition + "");
        wrapper.orderByDesc(KafkaOffset::getCreateTime);
        List<KafkaOffset> list = consumerOffsetDao.selectList(wrapper);
        if (CollectionUtils.isEmpty(list)) {
            log.info("{}>>>>>>>>>重新均衡分组<<<<<<<<<读取偏移量" + 0, getThread());
            return 0;
        }
        String offset = list.get(0).getConsumerOffset();
        log.info("{}>>>>>>>>>重新均衡分组<<<<<<<<<读取偏移量" + offset, getThread());
        return Long.valueOf(offset) + 1;
    }

    private void commitOffset(String group, String topic, int partition, long offset) {
        KafkaOffset kafkaOffset = new KafkaOffset();
        kafkaOffset.setConsumerGroup(group);
        kafkaOffset.setTopic(topic);
        kafkaOffset.setConsumerPartition(partition + "");
        kafkaOffset.setConsumerOffset(offset + "");
        kafkaOffset.setCreateTime(new Date());
        //consumerOffsetDao.updatebyKey(kafkaOffset);
        consumerOffsetDao.insert(kafkaOffset);
        log.info("{}>>>>>>>>>重新均衡分组>>>>>>>>>提交偏移量", getThread());

    }

    private String getThread() {
        return Thread.currentThread().getName();
    }
}
