package com.suncreate.commons;

import java.util.Collection;
import java.util.HashSet;
import java.util.Properties;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.suncreate.radarscan.model.RawRadarData;
import com.suncreate.radarscan.service.AlertEventDetectService;
import com.suncreate.util.JsonUtil;


public class KafkaConsumerThread implements Runnable{
	//日志记录        
    private Logger logger = LoggerFactory.getLogger(KafkaConsumerThread.class);
    
	private final KafkaConsumer<String, String> consumer;
	private String radarNo;
	private int partitionNum;
	
	public KafkaConsumerThread(int partitionNum,String radarNo) {
		this.radarNo = radarNo;
		this.partitionNum = partitionNum;
	    Properties prop = createConsumerConfig();
	    this.consumer = new KafkaConsumer<>(prop);
	    
	    //设置consumer只消费partition为partitionNum,topic为radardata上的数据
	    TopicPartition topicPartition = new TopicPartition("radardata",partitionNum);
	    Collection<TopicPartition> collection = new HashSet<TopicPartition>();
	    collection.add(topicPartition);
	    this.consumer.assign(collection);
	    
	    //下面是测试环境（没有进行分区）
//	    this.consumer.subscribe(Arrays.asList("radardata"));
	    
	  }
	
	private  Properties createConsumerConfig() {
	    Properties props = new Properties();
	    props.put("bootstrap.servers", "192.168.96.186:9092,192.168.96.186:9093,192.168.96.187:9092");
	    props.put("group.id", "glzhang");
	    props.put("enable.auto.commit", "true");
	    props.put("auto.commit.interval.ms", "1000");
	    props.put("session.timeout.ms", "30000");
	   // props.put("auto.offset.reset", "earliest");
	    props.put("auto.offset.reset", "latest");
	    props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
	    props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
	    return props;
	  }

	@Override
	public void run() {
		RawRadarData rawRadarData = null;
		while (true) {
		      ConsumerRecords<String, String> records = consumer.poll(100);
		      if(records.isEmpty()) {
		    	  logger.debug("kafka上分区号为:"+this.partitionNum + "的分区上没有数据,对应的雷达序列号为:" + this.radarNo + ",线程Name为:" + Thread.currentThread().getName());
		    	  continue;
		      }
		      for (ConsumerRecord<String, String> record : records) {
		    	//从对应的雷达partition上读取雷达数据，并添加到相应的缓存队列中
		    	try {
					rawRadarData = (RawRadarData)JsonUtil.getDTO(record.value(), RawRadarData.class);
					AlertEventDetectService.rawDataQueueMap.get(this.radarNo).put(rawRadarData);
					logger.debug("线程Name为:" + Thread.currentThread().getName() + ",插入到缓存队列中的信息为：" + rawRadarData.toString());
				} catch (InterruptedException ex) {
					logger.error("",ex);
				}
//		        System.out.println("Receive message: " + record.value() + ", Partition: "
//		            + record.partition() + ", Offset: " + record.offset() + ", by ThreadID: "
//		            + Thread.currentThread().getId());
		      }
		    }
	}
}
