package com.jenrey.spark.category;

import com.jenrey.hbase.dao.HBaseDao;
import com.jenrey.hbase.dao.factory.HBaseFactory;
import com.jenrey.utils.Utils;
import kafka.serializer.StringDecoder;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import scala.Tuple2;

import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;

/**
 * SparkStreaming的数据来源来自于Kafka的topics的aura
 */
public class CategoryRealCount {
    public static void main(String[] args) {
        /**
         * 初始化程序入口
         */
        SparkConf conf = new SparkConf();
        conf.setMaster("local");
        conf.setAppName("CategoryRealCount");
        /*JavaSparkContext sc = new JavaSparkContext(conf);
        JavaStreamingContext ssc = new JavaStreamingContext(sc, Durations.seconds(3));*/
        //或者使用下面方法就自动创建SparkContext()
        JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(3));
        ssc.checkpoint("/home/hadoop/checkpoint");
        /**
         * 读取数据
         */
        HashMap<String, String> KafkaParams = new HashMap<>();
        KafkaParams.put("metadata.broker.list", "hadoop04:9092");
        HashSet<String> topics = new HashSet<>();
        topics.add("test");
        //  createDirectStream使用直连的方式读取kafka中的数据
        KafkaUtils.createDirectStream(
                ssc,
                String.class,   //返回的key类型
                String.class,   //返回的value类型
                StringDecoder.class,    //解码器
                StringDecoder.class,    //解码器
                KafkaParams,
                topics
        ).map(new Function<Tuple2<String, String>, String>() {
            @Override
            public String call(Tuple2<String, String> tuple2) throws Exception {
                //kafka读出来数据是kv的形式[String代表k的数据类型（k可就是偏移位置的信息, String代表v的数据类型（kafka内每一条数据）, StringDecoder代表的就是解码器, StringDecoder]
                //直接返回的是InputDStream[(String,String)]的KV数据类型，因为偏移位置的信息对我们是没有用的所以我们要.map(_._2)
                return tuple2._2;
            }
        })
                /**
                 * 代码的逻辑
                 */
                //logDStream.print();
                .mapToPair(new PairFunction<String, String, Long>() {
                    public Tuple2<String, Long> call(String line) throws Exception {
                        return new Tuple2<String, Long>(Utils.getKey(line), 1L);
                    }
                }).reduceByKey(new Function2<Long, Long, Long>() {
            @Override
            public Long call(Long x, Long y) throws Exception {
                return x + y;
            }
        })
                //TODO:插入到HBase数据库
                .foreachRDD(new VoidFunction<JavaPairRDD<String, Long>>() {
                    @Override
                    public void call(JavaPairRDD<String, Long> rdd) throws Exception {
                        rdd.foreachPartition(new VoidFunction<Iterator<Tuple2<String, Long>>>() {
                            @Override
                            public void call(Iterator<Tuple2<String, Long>> partition) throws Exception {
                                //获取连接HBase的连接对象
                                HBaseDao hBaseDao = HBaseFactory.getHBaseDao();
                                while (partition.hasNext()) {
                                    Tuple2<String, Long> tuple = partition.next();
                                    hBaseDao.save("aura", tuple._1, "f", "name", tuple._2);
                                    System.out.println(tuple._1 + "   " + tuple._2);
                                }
                            }
                        });
                    }
                });
        /**
         * 启动应用程序
         */

        ssc.start();
        try {
            ssc.awaitTermination();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        ssc.stop();

    }
}
