package util;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;

import java.util.Collections;
import java.util.HashMap;
import java.util.Map;

public class MyKafkaUtils {
    private static KafkaProducer<String,String> producer = createProducer();


    private static KafkaProducer<String,String> createProducer() {
        Map<String,Object> producerConfigs = new HashMap<String, Object>();
        producerConfigs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,MyPropsUtils.apply(MyConfig.KAFKA_BOOTSTRAP_SERVERS));
        producerConfigs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG , "org.apache.kafka.common.serialization.StringSerializer");
        producerConfigs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG , "org.apache.kafka.common.serialization.StringSerializer");
        //acks
        producerConfigs.put(ProducerConfig.ACKS_CONFIG , "all");
        //retries
        //幂等配置
        producerConfigs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG , "true");
        return new KafkaProducer<String, String>(producerConfigs);
    }
    /**
     * 生产（者按照默认的黏性分区策略）
     */
    public static void  send(String  topic , String msg) {
        producer.send(new ProducerRecord<String, String>(topic, msg));
    }


    /**
     * 生产（按照key进行分区）
     */
    public static void  send(String  topic ,String key ,String msg) {
        producer.send(new ProducerRecord<String, String>(topic, key, msg));
    }

    /**
     * 关闭生产者对象
     */
    public static void  close() {
        producer.close();
    }


    /**
     * 刷写 ，将缓冲区的数据刷写到磁盘
     *
     */

    public static void flush(){
        producer.flush();
    }

    private static Map<String, Object> consumerConfigs(Map<String, Object> kafkaPara) {
        kafkaPara.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, MyPropsUtils.apply(MyConfig.KAFKA_BOOTSTRAP_SERVERS));
        kafkaPara.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG ,"org.apache.kafka.common.serialization.StringDeserializer");
        kafkaPara.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        kafkaPara.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
        kafkaPara.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        return kafkaPara;
    }
    /**
     * 基于SparkStreaming消费 ,获取到KafkaDStream , 使用默认的offset
     */
    public static JavaInputDStream<ConsumerRecord<Object, Object>>  getKafkaDStream(JavaStreamingContext jssc,
                                                                                   String topic, String groupId) {
        Map<String, Object> kafkaPara = new HashMap<String, Object>();
        kafkaPara = consumerConfigs(kafkaPara);
        kafkaPara.put(ConsumerConfig.GROUP_ID_CONFIG , groupId);
        JavaInputDStream<ConsumerRecord<Object, Object>> kafkaDStream =  KafkaUtils.createDirectStream(jssc, LocationStrategies.PreferConsistent(),
                ConsumerStrategies.Subscribe(Collections.singleton(topic),kafkaPara));
        return kafkaDStream;
    }
    /**
     * 基于SparkStreaming消费 ,获取到KafkaDStream , 使用指定的offset
     */

    public static JavaInputDStream<ConsumerRecord<Object, Object>> getKafkaDStream(JavaStreamingContext jssc, String topic, String groupId, Map<TopicPartition,Long> offsets) {
        Map<String, Object> kafkaPara = new HashMap<String, Object>();
        kafkaPara = consumerConfigs(kafkaPara);
        kafkaPara.put(ConsumerConfig.GROUP_ID_CONFIG , groupId);
        JavaInputDStream<ConsumerRecord<Object, Object>> kafkaDStream =  KafkaUtils.createDirectStream(jssc, LocationStrategies.PreferConsistent(),
                ConsumerStrategies.Subscribe(Collections.singleton(topic),kafkaPara, offsets));
        return kafkaDStream;
    }
}
