package gbench.appdemo.mall.erp;

import static gbench.common.matlib.MatlibCanvas.println;
import static gbench.common.tree.LittleTree.IRecord.L;

import java.time.Duration;
import java.time.LocalDate;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;

import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;

import gbench.common.tree.LittleTree.IRecord;

class KafkaApp {

    /**
     * 
     */
    public KafkaApp() {
        this("127.0.0.1:9092", "quickstart-events");
    }

    /**
     * 
     * @param mq_address_collection
     * @param topic
     */
    public KafkaApp(String mq_address_collection, String topic) {
        super();
        this.mq_address_collection = mq_address_collection;
        this.topic = topic;
    }

    /**
     * 
     * @author gbench
     *
     */
    class Consumer {

        /**
         * 初始化消费者
         */
        public Consumer() {
            this.consumer_topic = KafkaApp.this.topic;
            Properties configs = initialize();
            consumer = new KafkaConsumer<String, String>(configs);
            consumer.subscribe(Arrays.asList(this.consumer_topic));
        }

        /**
         * 初始化配置
         */
        public Properties initialize() {
            Properties props = new Properties();
            props.put("bootstrap.servers", KafkaApp.this.mq_address_collection);
            props.put("group.id", this.consumer_group_id);
            props.put("enable.auto.commit", this.consumer_enable_auto_commit);
            props.put("auto.commit.interval.ms", this.consumer_auto_commit_intervals_ms);
            props.put("session.timeout.ms", this.consumer_session_timeout_ms);
            props.put("max.poll.records", this.consumer_max_poll_records);
            props.put("auto.offset.reset", "earliest");
            props.put("key.deserializer", StringDeserializer.class.getName());
            props.put("value.deserializer", StringDeserializer.class.getName());
            return props;
        }

        /**
         * 
         */
        public void run() {
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(this.consumer_poll_time_out);
                records.forEach((ConsumerRecord<String, String> record) -> {
                    println("key", record.key(), "value", record.value(), "topic", record.topic());
                });
            }
        }

        private final String consumer_topic; // 消费者连接的topic
        private final String consumer_group_id = "1"; // groupId，可以分开配置
        private final String consumer_enable_auto_commit = "true"; // 是否自动提交（消费者）
        private final String consumer_auto_commit_intervals_ms = "1000";
        private final String consumer_session_timeout_ms = "30000"; // 连接超时时间
        private final int consumer_max_poll_records = 10; // 每次拉取数
        private final Duration consumer_poll_time_out = Duration.ofMillis(3000); // 拉去数据超时时间
        private final KafkaConsumer<String, String> consumer;
    }

    /**
     * 
     * @author gbench
     *
     */
    class Producer {

        /*
         * 初始化生产者
         */
        public Producer() {
            this.producer_topic = KafkaApp.this.topic;
            Properties configs = initialize();
            producer = new KafkaProducer<String, String>(configs);
        }

        /*
         * 初始化配置
         */
        Properties initialize() {
            Properties props = new Properties();
            props.put("bootstrap.servers", KafkaApp.this.mq_address_collection);
            props.put("acks", "all");
            props.put("retries", 0);
            props.put("batch.size", 16384);
            props.put("key.serializer", StringSerializer.class.getName());
            props.put("value.serializer", StringSerializer.class.getName());
            return props;
        }
        
        /**
         * 
         * @param topic
         * @param rec
         */
        public void send(final String topic, final IRecord rec) {
            final var record = new ProducerRecord<String, String>(this.producer_topic,rec.json(), System.nanoTime() + "");
            this.producer.send(record);
        }
        
        /**
         * 
         * @param topic
         * @param rec
         */
        public void send(final IRecord rec) {
            final var record = new ProducerRecord<String, String>(this.producer_topic,LocalDate.now()+"", rec.json());
            this.producer.send(record);
        }

        /**
         * 
         * @throws InterruptedException
         */
        public void run() throws InterruptedException {
            // 消息实体
            ProducerRecord<String, String> record = null;
            for (int i = 0; i < 10; i++) {
                record = new ProducerRecord<String, String>(this.producer_topic,LocalDate.now().toString(), System.nanoTime() + "");
                println(record);
                // 发送消息
                producer.send(record, new Callback() {
                    @Override
                    public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                        if (null != e) {
                            println("send error" + e.getMessage());
                        } else {
                            println(String.format("offset:%s,partition:%s", recordMetadata.offset(),
                                    recordMetadata.partition()));
                        } // if
                    }
                });
            }
            producer.close();
        }
        
        /**
         * 
         */
        public void close() {
            this.producer.close();
        }

        private final String producer_topic; // 生产者连接的topic
        private final KafkaProducer<String, String> producer;
    }
    
    /**
     * 
     * @author gbench
     *
     */
    public class Admin{
        
        public Admin(){
            this.initialize();
        }
        
        public void initialize() {
            Properties properties = new Properties();
            properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, mq_address_collection);
            client = AdminClient.create(properties);
        }
        
        /**
         * 创建一个主体
         * @param topic
         */
        public void createTopic(final String ... topics) {
            try {
               final var b =  client.listTopics().listings().get().stream().map(e->e.name()).filter(topic::equals).findFirst().isEmpty();
               if(b) {
                   client.createTopics(L(new NewTopic(topic,1,(short)1) ));
               }
            } catch (InterruptedException | ExecutionException e) {
                e.printStackTrace();
            }
        }
        
        
        /**
         * 删除主题
         */
        public void deleteTopics(final String ... topics) {
            try {
                final var tps = L(topics);
                final var _topics = client.listTopics().listings().get().stream().map(e->e.name()).filter(tps::contains).collect(Collectors.toList());
                if(_topics.size()>0) {
                    client.deleteTopics(_topics);
                }
             } catch (InterruptedException | ExecutionException e) {
                 e.printStackTrace();
             }
        }
        
        /**
         * 获取主体
         */
        public List<String> getTopics() {
            final var ll = new LinkedList<String>();
            try {
                final var dd = client.listTopics().listings().get().stream().map(e->e.name());
                ll.addAll(dd.collect(Collectors.toList()));
            } catch (InterruptedException | ExecutionException e) {
                e.printStackTrace();
            }
            
            return ll;
        }
        
        public void close() {
            this.client.close();
        }
        
        private AdminClient client;
    }

    public final String mq_address_collection; // kafka地址
    public final String topic;

}