package com.zq.learn.spark.streaming;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.Optional;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.sql.sources.In;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.dstream.DStream;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import scala.Tuple2;

import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;

/**
 * StreamingTest
 *
 * @author ZhangQi
 * @date 2020/2/11
 */
public class StreamingTest {
  public static void main(String[] args) {
//    readSocketMsg();
    readKafkaMsg();
  }

  /**
   * 读取Kafka消息，每5s读取一次消息，统计单词出现次数统计
   * 1.统计单次，所有消息中，单词出现的次数；
   * 2.历史累计，60秒checkpoint一次;
   * 3.开窗函数，5秒计算一次，计算前15秒的数据聚合;
   */
  private static void readKafkaMsg() {
    SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("KafkaWordCount");
    JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));
    //使用checkpoint必须配置文件路径，一般可以指定hdfs路径，为便于测试指定项目中路径checkpoint
//    jssc.checkpoint("hdfs://localhost:8020/checkpoint");
    jssc.checkpoint("checkpoint");

    // 构建kafka参数map
    String brokers="localhost:9092";
    String groupId="testGroup";
    // 主要要放置的是连接的kafka集群的地址（broker集群的地址列表）
    Map<String, Object> kafkaParams = new HashMap<>();
    //Kafka服务监听端口
    kafkaParams.put("bootstrap.servers", brokers);
    //指定kafka输出key的数据类型及编码格式（默认为字符串类型编码格式为uft-8）
    kafkaParams.put("key.deserializer", StringDeserializer.class);
    //指定kafka输出value的数据类型及编码格式（默认为字符串类型编码格式为uft-8）
    kafkaParams.put("value.deserializer", StringDeserializer.class);
    //消费者ID，随意指定
    kafkaParams.put("group.id",groupId);
    //指定从latest(最新,其他版本的是largest这里不行)还是smallest(最早)处开始读取数据
    kafkaParams.put("auto.offset.reset", "latest");
    //如果true,consumer定期地往zookeeper写入每个分区的offset
    kafkaParams.put("enable.auto.commit", false);

    //构建消息Topic集合
    String topics="topic1";
    Collection<String> topicsSet = new HashSet<>(Arrays.asList(topics.split(",")));

    //获取Kafka数据
    JavaInputDStream<ConsumerRecord<String, String>>
      lines= KafkaUtils.createDirectStream(jssc, LocationStrategies.PreferConsistent(),
      ConsumerStrategies.Subscribe(topicsSet,kafkaParams));
    //1.处理单次抓去消息，并统计结果，输出
    JavaPairDStream<String, Integer> wordCounts =
      lines.flatMap(x -> Arrays.asList(x.value().toString().split(" ")).iterator())
        .mapToPair(s -> new Tuple2<>(s, 1)).reduceByKey((i1, i2) -> i1 + i2);
    wordCounts.print();
    //2.历史累计 60秒checkpoint一次
//    DStream<Tuple2<String, Object>> totalCount =
//      lines.flatMap(x -> Arrays.asList(x.value().toString().split(" ")).iterator())
//        .mapToPair(s -> new Tuple2<>(s, 1)).updateStateByKey((values, state) -> {
//        Integer updateValue = 0;
//        if (state.isPresent()) {
//          updateValue = Integer.parseInt(state.get().toString());
//        }
//        for (Integer val : values) {
//          updateValue += val;
//        }
//        return Optional.of(updateValue);
//      }).checkpoint(Durations.seconds(60));
//    totalCount.print();
    //3.开窗函数，5秒计算一次，计算前15秒的数据聚合
//    JavaPairDStream<String, Integer> partCount =
//      lines.flatMap(x -> Arrays.asList(x.value().toString().split(" ")).iterator())
//        .mapToPair(s -> new Tuple2<>(s, 1))
//        .reduceByKeyAndWindow((x, y) -> x + y, Durations.seconds(15), Durations.seconds(5));
//    partCount.print();

    jssc.start();
    try {
      jssc.awaitTermination();
    } catch (InterruptedException e) {
      e.printStackTrace();
    }finally {
      jssc.close();
    }

  }

  /**
   * 读取socket输入字符，做简单统计
   * 使用nc -lk 9999 向9999端口写入字符串
   * SparkStreaming监听9999端口，取到一行输入，根据【空格】进行分割，并统计单词出现次数
   */
  public static void readSocketMsg(){
    /**
     * setMaster()用于设置spark.master参数。
     * 测试使用local（单机模式），[k]表示指定几个线程来执行。
     */
    SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount");
    JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(1));

    JavaReceiverInputDStream<String> lines = jssc.socketTextStream("localhost", 9999);
    JavaDStream<String> words = lines.flatMap(x -> Arrays.asList(x.split(" ")).iterator());
    JavaPairDStream<String, Integer> pairs = words.mapToPair(s -> new Tuple2<>(s, 1));
    JavaPairDStream<String, Integer> wordCounts = pairs.reduceByKey((i1, i2) -> i1 + i2);
    wordCounts.print();

    jssc.start();
    try {
      jssc.awaitTermination();
    } catch (InterruptedException e) {
      e.printStackTrace();
    }
  }
}
