package com.tcm.kafka;

import org.apache.kafka.clients.consumer.ConsumerInterceptor;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.util.List;
import java.util.*;
import java.util.stream.Collectors;


public class ConsumerInterceptorTTL implements ConsumerInterceptor<String, String> {

    private final long EXPIRE_INTERVAL = 10*1000;

    @Override
    public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> consumerRecords) {
        long now = System.currentTimeMillis();
        Map<TopicPartition, List<ConsumerRecord<String, String>>> newRecords = new HashMap();

        consumerRecords.partitions().forEach(p -> {
            List<ConsumerRecord<String, String>> tpRecords = consumerRecords.records(p);
//            List<ConsumerRecord<String, String>> newTpRecords = new ArrayList();
//
//            tpRecords.forEach(t -> {
//                if(now - t.timestamp() < EXPIRE_INTERVAL){
//                    newTpRecords.add(t);
//                }
//            });
            List<ConsumerRecord<String, String>> newTpRecords = tpRecords.stream().filter(t-> now-t.timestamp()<EXPIRE_INTERVAL).collect(Collectors.toList());
            if(!newTpRecords.isEmpty()) newRecords.put(p, newTpRecords);
        });
        return new ConsumerRecords<>(newRecords);
    }

    @Override
    public void onCommit(Map<TopicPartition, OffsetAndMetadata> map) {
        map.forEach((top, off) -> {
            System.out.println("topic:"+top.topic()+"->offset:"+off.offset());
        });
    }

    @Override
    public void close() {

    }

    @Override
    public void configure(Map<String, ?> map) {

    }
}
