package com.sinux.generality.basesupport.utils.kafka.consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.IntStream;

public class QueueKafkaConsumerClass<k,v> {

    private Queue<ConsumerRecord<k, v>> queue = new LinkedList<>();

    private KafkaConsumerInterface kafkaConsumerInterface = null;

    private Lock lock = new ReentrantLock();

    private Collection<String> topic = null;

    private List<String> bootstrapServers = null;

    private Set<TopicPartition> partitions = null;

    private String groupID=null;

    public QueueKafkaConsumerClass(){
//        if(kafkaConsumerInterface == null){
//            kafkaConsumerInterface = ConsumerFactory.buildConsumerClassByBoostrapServers("default",bootstrapServers,groupID);
//        }
    }

    class ConsumerRunner implements Runnable{

        @Override
        public void run() {
            while(true) {
                try {
                    lock.lock();
                    kafkaConsumerInterface = ConsumerFactory.buildConsumerClassByBoostrapServers("default",bootstrapServers,groupID);
                    ConsumerRecords<k, v> records = kafkaConsumerInterface.poll(topic);
                    partitions = records.partitions();
                    for (TopicPartition partition : partitions) {
                        System.out.println(partition);
                    }

                    if (records == null) {
                        break;
                    }
                    for (ConsumerRecord<k, v> record : records) {
                        boolean isHave = false;
                        for (ConsumerRecord<k, v> kvConsumerRecord : queue) {
                            if (kvConsumerRecord.offset()==record.offset()) {
                                isHave = true;
                                break;
                            }
                        }
                        if (!isHave) {

                            System.out.println("==================================================》" + record);
                            queue.offer(record);
                            System.out.println("==========================> 添加了之后数量 "+queue.size());
                        }
                    }

                } catch (Exception e) {
                    e.printStackTrace();
                } finally {
                    lock.unlock();
                    kafkaConsumerInterface.close();
                    try {
                        Thread.currentThread().sleep(5000);
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                }
            }
        }
    }

    public void addRecord(List<String> bootstrapServers, String groupID, Collection<String> topic){
        this.bootstrapServers = bootstrapServers;
        this.groupID = groupID;
        this.topic = topic;
        ExecutorService executor = Executors.newFixedThreadPool(1);
        IntStream.range(0, 1).forEach(i -> {
            executor.submit(new ConsumerRunner());
        });
//        new ConsumerRunner().run();
//        ConsumerRunner cr=new ConsumerRunner();
//        Thread t=new Thread(cr);
//        t.run();
    }

    public ConsumerRecord<k, v> removeRecord() {
        try{
            lock.lock();
            kafkaConsumerInterface = ConsumerFactory.buildConsumerClassByBoostrapServers("default",bootstrapServers,groupID);

            if ( queue == null || queue.size() ==0 || partitions == null || partitions.size() == 0 ){
                return null;
            }
            ConsumerRecord<k, v> record = queue.poll();
            for (TopicPartition partition : partitions) {
                kafkaConsumerInterface.commitSync(topic,Collections.singletonMap(partition,new OffsetAndMetadata(record.offset())));
            }
            return record;
        }catch (Exception e){
            e.printStackTrace();
            return null;
        }finally {
            lock.unlock();
            kafkaConsumerInterface.close();
        }
    }

    public synchronized Queue<ConsumerRecord<k, v>> getQueue() {
        return queue;
    }

    public static void main(String[] args) {
        List<String> strings = new ArrayList<>();
        strings.add("127.0.0.1:9092");
        List<String> strings2 = new ArrayList<>();
        strings2.add("test");
        QueueKafkaConsumerClass<String, String> test = new QueueKafkaConsumerClass<>();

        test.addRecord(strings, "test",strings2);
        Queue<ConsumerRecord<String, String>> queue2 = null;

        while(true) {
            try {
                Thread.currentThread().sleep(5000);
                queue2 = test.getQueue();
                System.out.println("==========================> 消费之前数量 "+queue2.size());
                test.removeRecord();
                queue2 = test.getQueue();
                System.out.println("==========================> 消费之后数量 "+queue2.size());
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    }
}
