package com.match.plugin.kafka;

import com.jfinal.kit.PropKit;
import com.jfinal.plugin.IPlugin;
import com.match.plugin.threadpool.ThreadPoolKit;

import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import java.util.Properties;
import java.util.Map.Entry;

/**
 * 本系统里面:
 * 一.生产者是为了往撮合系统外传递各种订单信息
 * 	1.撮合第一步过程中产生的每一笔撮合情况
 * 	2.撮合第一步完成后变化后的订单
 * 	3.撮合第二步产生的被吃单的变化情况
 * 	4.撤销请求的情况
 * 
 * 二.消费者是用于接收向撮合系统输入的订单
 * 	1.需要撮合的订单
 * 	2.需要撤销的订单
 * 
 */
public class KafkaPlugin implements IPlugin {
	
    public KafkaPlugin() {
    	Properties props = new Properties();
    	/*
    	props.put("metadata.broker.list", "192.168.18.140:9092");//此处配置的是kafka的端口
	  	props.put("serializer.class", "kafka.serializer.StringEncoder");//配置value的序列化类
	  	//0, which means that the producer never waits for an acknowledgement from the broker (the same behavior as 0.7). This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails).
	  	//1, which means that the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost).
	  	//-1, which means that the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains.
	  	props.put("request.required.acks","-1");
	  	props.put("acks", "all");//Server完成 producer request 前需要确认的数量。acks=0时，producer不会等待确认，直接添加到socket等待发送；acks=1时，等待leader写到local log就行；acks=all或acks=-1时，等待isr中所有副本确认（注意：确认都是 broker 接收到消息放入内存就直接返回确认，不是需要等待数据写入磁盘后才返回确认，这也是kafka快的原因）
	  	props.put("retries ", 1);
	  	props.put("buffer.memory", 33554432);//Producer可以用来缓存数据的内存大小。该值实际为RecordAccumulator类中的BufferPool，即Producer所管理的最大内存。如果数据产生速度大于向broker发送的速度，producer会阻塞max.block.ms，超时则抛出异常
	  	props.put("zookeeper.connect", "192.168.18.140:2181");//zookeeper 配置
    	props.put("zookeeper.session.timeout.ms", "4000");//zk连接超时
    	props.put("zookeeper.sync.time.ms", "200");
    	props.put("auto.commit.interval.ms", "1000");
    	props.put("auto.offset.reset", "smallest");
    	props.put("serializer.class", "kafka.serializer.StringEncoder");//序列化类
    	props.put("enable.auto.commit", "false");
	    props.put("auto.commit.interval.ms", "1000");
	    props.put("session.timeout.ms", "3000");
	    props.put("group.id", "jd-group");
    	 */
    	props.put("bootstrap.servers", PropKit.get("kafkaServer"));
        props.put("key.serializer", "org.apache.kafka.common.serialization.LongSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.LongDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("batch.size", 0);
    	//消费者
        //1.
        KafkaConsumer<Long, String> consumer1 = Kafka.addConsumer(Kafka.matchOrder, Kafka.matchOrder, props);
        //2.
        KafkaConsumer<Long, String> consumer2 = Kafka.addConsumer(Kafka.revokeOrder, Kafka.revokeOrder, props);
    	
    	//生产者
    	//1.
    	Kafka.addProducer(Kafka.matchTrade, props);
    	//2.3.
    	Kafka.addProducer(Kafka.orderChange, props);
    	//4
    	Kafka.addProducer(Kafka.revokeResult, props);
    	//5
    	if(true) {
    		Properties props2 = new Properties();
    		props2 = props;
    		props2.put("request.required.acks","0");
    		Kafka.addProducer(Kafka.tradeDepth, props2);
    	}
    	
    	
    	
    	
    	/**
    	 * 消费者直接开启消费
    	 */
    	ThreadPoolKit.submit(new Runnable() {
			public void run() {
				Kafka.matchOrderConsumer(consumer1);
			}
		});
    	ThreadPoolKit.submit(new Runnable() {
			public void run() {
				Kafka.revokeOrderConsumer(consumer2);
			}
		});
    	
		
    }
    
    
    
    
    
    
    
    @Override
    public boolean start() {
    	System.err.println("kafka消费生产开启! producer:"+Kafka.producerMap.size()+"个;consumer:"+Kafka.consumerMap.size()+"个;" );
        return true;
    }
    
    @Override
    public boolean stop() {
    	System.err.println("销毁所有生产者和消费者开始");
        for (Entry<String, KafkaProducer<Long, String>> entry : Kafka.producerMap.entrySet()) {
            entry.getValue().close();
        }
        for (Entry<String, KafkaConsumer<Long, String>> entry : Kafka.consumerMap.entrySet()) {
            entry.getValue().close();
        }
        System.err.println("销毁所有生产者和消费者结束");
        return true;
    }
}
