package com.borya.record.sender;

import java.util.Arrays;
import java.util.Objects;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.LockSupport;

import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;

import com.borya.constant.SystemConstant;
import com.borya.framework.annotation.Component;

@Component("kafkaRecordSender")
public class KafkaRecordSender implements RecordSender{

	private final Producer<String, String> producer;  
    private final Consumer<String, String> consumer;  
  
    private final String BOOTSTRAP_SERVERS = SystemConstant.Kafka.BOOTSTRAP_SERVERS;
    
    private final String TOPIC_NAME = SystemConstant.Kafka.TOPIC;
    private final String GROUP_ID = TOPIC_NAME;
    private final String[] TOPIC_NAMES = {TOPIC_NAME};

    /** 
     * 生产者，注意kafka生产者不能够从代码上生成主题，只有在服务器上用命令生成 
     */  
    public KafkaRecordSender() {  
    	Properties propsProducer = new Properties();  
        propsProducer.put("bootstrap.servers", BOOTSTRAP_SERVERS);//服务器ip:端口号，集群用逗号分隔  
        propsProducer.put("acks", "all");  
        propsProducer.put("retries", 0);  
        propsProducer.put("batch.size", 16384);  
        propsProducer.put("linger.ms", 1);  
        propsProducer.put("buffer.memory", 33554432);  
        propsProducer.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");  
        propsProducer.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");  
        producer = new KafkaProducer<>(propsProducer);  
        /** 
         * 消费者 
         */  
        
        Properties propsConsumer = new Properties();  
        propsConsumer.put("bootstrap.servers", BOOTSTRAP_SERVERS);//服务器ip:端口号，集群用逗号分隔  
        propsConsumer.put("group.id", GROUP_ID);  
        //consumer应该从哪个offset开始消费.latest表示接受接收最大的offset(即最新消息),earliest表示最小offset,
        //即从topic的开始位置消费所有消息.最好设为earliest,这样新的分组,能从最开始进行处理
        propsConsumer.put("auto.offset.reset", "earliest");
        /* 是否自动确认offset */
        propsConsumer.put("enable.auto.commit", "true");  
        propsConsumer.put("auto.commit.interval.ms", "1000");  
        propsConsumer.put("session.timeout.ms", "30000");  
        propsConsumer.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");  
        propsConsumer.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");  
        consumer = new KafkaConsumer<>(propsConsumer);  
    }  
  
    @Override
   	public void send(String key, String msg) {
    	send(TOPIC_NAME,key,msg);
   	}
    
    /** 
     * 发送对象消息 至kafka上,调用json转化为json字符串，应为kafka存储的是String。 
     * @param msg 
     */  
    public void send(String topicName,String key,String msg) {  
    	send(topicName,key,msg,null);
    }

	public void send(String topicName,String key,String msg,FutureCallback callback) {  
    	if(Objects.isNull(callback)){
    		producer.send(new ProducerRecord<String, String>(topicName, key, msg));  
    	}
    	else{
    		producer.send(new ProducerRecord<String, String>(topicName, key, msg),callback);  
    	}
    }  
  
    
    /** 
     * 从kafka上接收对象消息，将json字符串转化为对象，便于获取消息的时候可以使用get方法获取。 
     */  
//    public void onReceiver(){
//        while(true){  
//            ConsumerRecords<String, String> records = getKafkaConsumer().poll(100);  
//            if (records.count() > 0) {  
//                for (ConsumerRecord<String, String> record : records) {  
////                    JSONObject jsonAlarmMsg = JSON.parseObject(record.value());  
////                    LOGGER.info("从kafka接收到的消息是：" + alarmMsg.toString());  
//                	System.out.println(record.topic()+":"+record.value());
//                }  
//                
//                continue;
//            }
//            
//            LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(2));
//        }  
//    }  
    
    public void onReceiver(ReceiveProcess prcess){
    	consumer.subscribe(Arrays.asList(TOPIC_NAMES));  
        while(true){  
        	/* 读取数据，读取超时时间为100ms */
            ConsumerRecords<String, String> records = getKafkaConsumer().poll(100);  
            if (records.count() > 0) {  
                for (ConsumerRecord<String, String> record : records) {  
                	prcess.onProcess(record);
                }
                
                continue;
            }
            
            LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(2));
        }  
    }  
    
    public Consumer<String, String> getKafkaConsumer() {  
        return consumer;  
    }  
  
    @Override
	public void close() {
    	closeProducer();
    	closeConsumer();
	}

	public void closeProducer() {  
        producer.close();  
    }  
  
    public void closeConsumer() {  
        consumer.close();  
    }  
    
    public static void main(String[] args) {
		
//    	for(int i = 0;i<1000;i++){
//    		sendMsgToKafka(TOPIC_NAME, "test", "{\"code\":\"200\",\"msg\":\"200\",\"num\":\""+i+"\"}");
//    	}
//    	onReceiver();
//    	System.out.println("end");
	}
}