package org.huangrui.spark.scala.streaming

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.DStream

/**
 * nc -lp 9999
 * nc -L -p 9999
 * @Author hr
 * @Create 2024-10-21 18:44 
 */
object SparkStreaming04_Print {
  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkStreaming")
    val sc: StreamingContext = new StreamingContext(sparkConf, Seconds(3))

    val lines = sc.socketTextStream("localhost", 9999)
    val words: DStream[String] = lines.flatMap(_.split(" "))
    val ds: DStream[(String, Int)] = words.map(word => (word, 1)).reduceByKey(_ + _)

    // TODO DStream确实就是对RDD的封装，但是不是所有的方法都进行了分装。有些方法不能使用：sortBy, sortByKey
    //      如果特定场合下，就需要使用这些方法，那么就需要将DStream转换为RDD使用
    // TODO DStream中print方法类似于RDD的行动算子
    // IllegalArgumentException : No output operations registered, so nothing to execute
    // ds.print();
    ds.foreachRDD((rdd: RDD[(String, Int)]) => {
      println("----------------------------------------")
      println("Time :" + System.currentTimeMillis + " ms")
      println("----------------------------------------")
      rdd.collect.foreach(println)
    })

    sc.start()
    sc.awaitTermination()
  }
}
