package com.exchange.plugin.kafka;

import com.exchange.plugin.threadpool.ThreadPoolKit;
import com.jfinal.kit.PropKit;
import com.jfinal.plugin.IPlugin;

import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import java.util.Properties;
import java.util.Map.Entry;

/**
 * 本系统里面:
 * 一.生产者是为了往撮合系统外传递各种订单信息
 * 	1.撮合第一步过程中产生的每一笔撮合情况
 * 	2.撮合第一步完成后变化后的订单
 * 	3.撮合第二步产生的被吃单的变化情况
 * 	4.撤销请求的情况
 * 
 * 二.消费者是用于接收向撮合系统输入的订单
 * 	1.需要撮合的订单
 * 	2.需要撤销的订单
 * 
 */
public class KafkaPlugin implements IPlugin {
	
    public KafkaPlugin() {
    	Properties props = new Properties();
    	/*
    	props.put("metadata.broker.list", "192.168.18.140:9092");//此处配置的是kafka的端口
	  	props.put("serializer.class", "kafka.serializer.StringEncoder");//配置value的序列化类
	  	//0, which means that the producer never waits for an acknowledgement from the broker (the same behavior as 0.7). This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails).
	  	//1, which means that the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost).
	  	//-1, which means that the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains.
	  	props.put("request.required.acks","-1");
	  	props.put("acks", "all");
	  	props.put("retries ", 1);
	  	props.put("buffer.memory", 33554432);
	  	props.put("zookeeper.connect", "192.168.18.140:2181");//zookeeper 配置
    	props.put("zookeeper.session.timeout.ms", "4000");//zk连接超时
    	props.put("zookeeper.sync.time.ms", "200");
    	props.put("auto.commit.interval.ms", "1000");
    	props.put("auto.offset.reset", "smallest");
    	props.put("serializer.class", "kafka.serializer.StringEncoder");//序列化类
    	props.put("enable.auto.commit", "false");
	    props.put("auto.commit.interval.ms", "1000");
	    props.put("session.timeout.ms", "3000");
	    props.put("group.id", "jd-group");
    	 */
    	props.put("bootstrap.servers", PropKit.get("kafkaServer"));
        props.put("key.serializer", "org.apache.kafka.common.serialization.LongSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.LongDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    	
    	//生产者
        Kafka.addProducer(Kafka.matchOrder, props);
    	Kafka.addProducer(Kafka.revokeOrder, props);
       
    	//消费者
        KafkaConsumer<Long, String> consumer1 = Kafka.addConsumer(Kafka.matchTrade, Kafka.matchTrade, props);
        KafkaConsumer<Long, String> consumer2 = Kafka.addConsumer(Kafka.orderChange, Kafka.orderChange, props);
        KafkaConsumer<Long, String> consumer3 = Kafka.addConsumer(Kafka.revokeResult, Kafka.revokeResult, props);
        KafkaConsumer<Long, String> consumer4 = Kafka.addConsumer(Kafka.tradeDepth, Kafka.tradeDepth, props);
    	
    	
    	/**
    	 * 消费者直接开启消费
    	 */
        ThreadPoolKit.submit(new Runnable() {
			public void run() {
				Kafka.matchOrderConsumer(consumer1);
			}
		});
        ThreadPoolKit.submit(new Runnable() {
			public void run() {
				Kafka.revokeOrderConsumer(consumer2);
			}
		});
        ThreadPoolKit.submit(new Runnable() {
			public void run() {
				Kafka.orderChangeConsumer(consumer3);
			}
		});
        ThreadPoolKit.submit(new Runnable() {
			public void run() {
				Kafka.tradeDepthConsumer(consumer4);
			}
		});
    	
		
    }
    
    
    
    
    
    
    
    @Override
    public boolean start() {
    	System.err.println("kafka消费生产开启! producer:"+Kafka.producerMap.size()+"个;consumer:"+Kafka.consumerMap.size()+"个;" );
        return true;
    }
    
    @Override
    public boolean stop() {
    	System.err.println("销毁所有生产者和消费者开始");
        for (Entry<String, KafkaProducer<Long, String>> entry : Kafka.producerMap.entrySet()) {
            entry.getValue().close();
        }
        for (Entry<String, KafkaConsumer<Long, String>> entry : Kafka.consumerMap.entrySet()) {
            entry.getValue().close();
        }
        System.err.println("销毁所有生产者和消费者结束");
        return true;
    }
}
