package com.spark_streaming.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.*;
import scala.Serializable;

import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;

public class FFTKafkaMain {

    public static void main(String[] args) throws Exception {
        SparkConf conf = new SparkConf().setMaster("local[4]").setAppName("dec-kks-etl");
        conf.set("spark.streaming.stopGracefullyOnShutdown", "true");
        conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
        conf.set("spark.streaming.kafka.maxRatePerPartition", "1");
        conf.set("spark.streaming.blockInterval", "1000");
        JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(1));

        Map<String, Object> config = new HashMap<String, Object>();
        config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
//        config.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1);
        config.put(ConsumerConfig.GROUP_ID_CONFIG, "group_id_8");
//        config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "bigdata.lkl.com:9092, bigdata.lkl.com:9093, bigdata.lkl.com:9094");
        config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "bigdata-slave1.phmcluster.calabar:6667,bigdata-slave2.phmcluster.calabar:6667,bigdata-slave3.phmcluster.calabar:6667");
        config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);

        Collection<String> topics = Arrays.asList("dec-rds-vibration-p1-s1-test");
        JavaInputDStream<ConsumerRecord<String, String>> stream =
                KafkaUtils.createDirectStream(
                        jssc,
                        LocationStrategies.PreferBrokers(),
                        ConsumerStrategies.<String, String>Subscribe(topics, config)
                );

        // 内存中保存偏移量
        AtomicReference<OffsetRange> offsetRangeAtomicReference = new AtomicReference<>();
        stream.foreachRDD(new VoidFunction<JavaRDD<ConsumerRecord<String, String>>>() {
            @Override
            public void call(JavaRDD<ConsumerRecord<String, String>> rdd) throws Exception {
                OffsetRange[] offsetRanges = ((HasOffsetRanges) rdd.rdd()).offsetRanges();
                for (OffsetRange o : offsetRanges) {
                    System.out.println(o.partition() + " " + o.fromOffset() + " " + o.untilOffset());
                    offsetRangeAtomicReference.set(o);
                }
                ((CanCommitOffsets) stream.inputDStream()).commitAsync(offsetRanges);
            }
        });

        stream.foreachRDD(new VoidFunction<JavaRDD<ConsumerRecord<String, String>>>() {
            @Override
            public void call(JavaRDD<ConsumerRecord<String, String>> rdd) throws Exception {

                Handler.handler(rdd);

            }
        });

        jssc.start();
        jssc.awaitTermination();
    }
}

class Handler implements Serializable{
    public static void handler(JavaRDD<ConsumerRecord<String, String>> rdd){
        rdd.foreach(new VoidFunction<ConsumerRecord<String, String>>() {
            @Override
            public void call(ConsumerRecord<String, String> sscr) throws Exception {
                System.out.println(sscr.offset()+","+sscr.value());
                Thread.sleep(10000);
            }
        });
    }
}