package com.kafka;

import com.util.CsvUtil;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.time.Duration;
import java.util.Arrays;
import java.util.Date;
import java.util.Properties;


/**
 * 接受kafka数据(该项目中不使用)
 * @author admin
 * 配置文件位置/usr/local/cp/cp_alarm_process/sys.properties
 * 本地F:/usr/local/cp/cp_alarm_process/sys.properties
 */
public class ReceiveKafkaData {
    final static Logger logger = LoggerFactory.getLogger(ReceiveKafkaData.class);
    private final static String Kafka_Server_Ip = CsvUtil.getSysProperty("bootstrap.servers");
    private final static String groupId =CsvUtil.formatDate(new Date(), CsvUtil.DATE_FORMAT7);// CsvUtil.getSysProperty("group.id.cp_alarm_process");
    private static KafkaConsumer<String, String> consumer;
    //    private final static String []Device_No_List = new String[]{"video_alarm"};

//    //设备号与时间分别是0,1
//    private final static Integer otherNum = 1;
//    //电池仓,一共30个通道数据为2-31
//    private final static Integer warehouseNum = 32;
//    //充电柜,一共30个通道数据为32-51
//    private final static Integer cabinetNum = 52;
//    //线槽,一共30个通道数据为52-57
//    private final static Integer lineNum = 58;

    public ReceiveKafkaData(){
        logger.info("*********************开始初始化kafka消费者端*************************");
        Properties props = new Properties();
//      props.put("bootstrap.servers", "47.106.236.114:9092");
        props.put("bootstrap.servers", Kafka_Server_Ip);
        //每个消费者分配独立的组号
        //props.put("group.id", "store_data");//接受同一份消息group必须不一样
        props.put("group.id", groupId);//接受同一份消息group必须不一样

        //如果value合法，则自动提交偏移量
        props.put("enable.auto.commit", "true");
        //设置多久一次更新被消费消息的偏移量
        props.put("auto.commit.interval.ms", "1000");
        //设置会话响应的时间，超过这个时间kafka可以选择放弃消费或者消费下一条消息
        props.put("session.timeout.ms", "30000");
        //自动重置offset
//        props.put("auto.offset.reset","earliest");//当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，从头开始消费
        props.put("auto.offset.reset","latest");//当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，消费新产生的该分区下的数据

        props.put("key.deserializer",
                "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer",
                "org.apache.kafka.common.serialization.StringDeserializer");
        consumer = new KafkaConsumer<String, String>(props);
    }


    private final static String []Device_No_List = new String[]{"site_data"};
    public void consume(){
        consumer.subscribe(Arrays.asList(Device_No_List));
        logger.info("开始消费topic: "+Device_No_List[0]+"");
        //Gson gson=new Gson();
        while (true) {
            //消费消息，将生产后的温度数据再放到kafka
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record : records){
                String key=record.key();//消息的的key
                logger.info("消息信息： offset = {}, key = {}", record.offset(), record.key());
                try {
                    long l=System.currentTimeMillis();



                    logger.info(key+ "接受数据耗时："+(System.currentTimeMillis()-l)+"ms");
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }
    }

}
