package SparkStreaming;

import org.apache.hadoop.conf.Configuration;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.Tuple2;

import java.util.HashMap;
import java.util.Map;


public class kafkaSourceInput  {

    private static final Logger LOG = LoggerFactory.getLogger(kafkaSourceInput.class);

    private transient JavaSparkContext javaSparkContext;
    private transient JavaStreamingContext javaStreamingContext;
    private transient JavaPairReceiverInputDStream<String, String> dataStream;


    public kafkaSourceInput(){
        initStream();
    }


    /**
     * 初始化SparkStream 并且启动，接收数据
     */
    public void  initStream(){

        SparkConf sparkConf = new SparkConf().setAppName("KafkaSourceInput").setMaster("local");
        javaSparkContext = new JavaSparkContext(sparkConf);

        /**
         * 流式计算入口JavaStreamingContext，StreamingContext会在底层创建出SparkContext，用来处理数据；
         * StreamingContext构造函数还会接收用来指定多长时间处理一次新数据的批次间隔（batch interval）作为输入
         *
         */
        javaStreamingContext = new JavaStreamingContext(javaSparkContext,new Duration(StreamSource.KAFKA_BATCH_DURATION));

        String kafkaZkQuorum = StreamSource.KAFKA_ZKQUORUM;
        String groupId = StreamSource.KAFKA_GROUP_ID;

//        LOG.info("数据接入Kafka集群地址 :{}, ", kafkaZkQuorum, groupId);
        System.out.println("数据接入Kafka集群地址 : "+ kafkaZkQuorum + " " + groupId);

        HashMap<String,Integer> topics = new HashMap<String, Integer>();
        topics.put("useragent",1);

        dataStream = KafkaUtils.createStream(javaStreamingContext,kafkaZkQuorum,groupId,topics);
    }

    public void compute(){
        dataStream.foreachRDD(new Function<JavaPairRDD<String, String>,Void>() {
            @Override
            public Void call(JavaPairRDD<String, String> v1) throws Exception {
                long count = v1.count();

                System.out.println("@@@@@@@@@@@接收到数据："+count);

                JavaRDD<String> values = v1.values();

                values.foreach(new VoidFunction<String>() {
                    @Override
                    public void call(String s) throws Exception {
                        System.out.println(s);
                    }
                });
                return null;
            }
        });
    }



    public static void main(String[] args) {

        //com.sdyc.ndmp.campaign.stream.persist.UserAgentOutPutHdfsPersist
        kafkaSourceInput kafkaSourceInput = new kafkaSourceInput();

        kafkaSourceInput.dataStream.foreach(new Function<JavaPairRDD<String, String>, Void>() {
            @Override
            public Void call(JavaPairRDD<String, String> v1) throws Exception {
                JavaRDD<String> values = v1.values();

                values.foreach(new VoidFunction<String>() {
                    @Override
                    public void call(String s) throws Exception {
                        System.out.println(s);
                    }
                });
                return null;
            }
        });

    }
}
