package com.spark.zhou.demo.streaming;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.Optional;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import scala.Tuple2;

import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * @Description: 根据transform方法过滤用户黑名单
 * @Author: ZhOu
 * @Date: 2018/11/8
 */
public class TransFormDemo {
    public static void main(String[] args) throws InterruptedException {
        SparkConf sparkConf = new SparkConf().setAppName("transformDemo").setMaster("local");
        JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.seconds(10));

        //设置黑名单
        List<Tuple2<String, Boolean>> list = Arrays.asList(new Tuple2<>("Tom", true),
                new Tuple2<>("Jack", true),
                new Tuple2<>("Lucy", false));
        JavaPairRDD<String, Boolean> blackListRDD = jssc.sparkContext().parallelizePairs(list);

        List<String> topics = Arrays.asList("test", "test1");
        Map<String, Object> kafkaParams = new HashMap<>(8);
        kafkaParams.put("bootstrap.servers", "node1:9092,node2:9092,node3:9092");
        kafkaParams.put("group.id", "test");
        kafkaParams.put("enable.auto.commit", false);
        kafkaParams.put("auto.commit.interval.ms", 5000);
        kafkaParams.put("key.deserializer", StringDeserializer.class);
        kafkaParams.put("value.deserializer", StringDeserializer.class);
        kafkaParams.put("auto.offset.reset", "latest");

        //接收实时数据
        JavaInputDStream<ConsumerRecord<String, String>> inputDStream = KafkaUtils.createDirectStream(jssc,
                LocationStrategies.PreferConsistent(),
                ConsumerStrategies.Subscribe(topics, kafkaParams));

        //把数据转换为pair，以用户名为键，整条纪录为值
        JavaPairDStream<String, String> pairDStream = inputDStream.mapToPair(record -> {
            String[] values = record.value().split(" ");
            return new Tuple2<>(values[0], record.value());
        });

        //判断用户是否在黑名单中
        JavaDStream<String> result = pairDStream.transform(pairRecord -> {
            //纪录和黑名单关联
            JavaPairRDD<String, Tuple2<String, Optional<Boolean>>> joinRDD = pairRecord.leftOuterJoin(blackListRDD);

            //过滤筛选黑名单
            JavaPairRDD<String, Tuple2<String, Optional<Boolean>>> filterRecord = joinRDD.filter(record -> !(record._2._2.isPresent() && record._2._2.get()));

            //返回满足条件的纪录
            JavaRDD<String> name = filterRecord.map(filter -> filter._2._1);
            return name;
        });

        //打印前十的结果
        result.print();

        jssc.start();
        jssc.awaitTermination();
    }
}
