package com.sunzm.spark.streaming

import org.apache.commons.lang3.time.DateFormatUtils
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.spark.streaming.dstream.DStream

/**
 * 测试每个批次形成几个RDD，有几个分区
 */
object DStreamDemo {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName(s"${this.getClass.getSimpleName.stripSuffix("$")}")
      .setMaster("local[*]")

    val ssc = new StreamingContext(conf, Durations.seconds(5))

    val lines = ssc.socketTextStream("82.156.210.70", 9999)

    val wordCounts: DStream[(String, Int)] = lines.transform(rdd => {

      rdd.flatMap(_.split(","))
        .map((_, 1))
        .reduceByKey(_ + _)

    })

    wordCounts.foreachRDD((rdd, time) => {

      val milliseconds = time.milliseconds

      val timeStr = DateFormatUtils.format(milliseconds, "yyyy-MM-dd HH:mm:ss")

      if (!rdd.isEmpty()) {

        val partitionsLength = rdd.partitions.length

        println(s"当前计算时间: ${timeStr}, 当前RDD的分区数： ${partitionsLength}")

        rdd.foreach(line => {
          println(s"单词个数统计结果: ${line}")
        })
      } else {
        println(s"没有需要计算的数据: ${timeStr}")
      }

    })

    ssc.start()
    ssc.awaitTermination()
  }
}
