package com.shujia.streaming

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo7SaveToFile {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder()
      .master("local[2]")
      .appName("提交命令执行")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    val sparkContext: SparkContext = sparkSession.sparkContext
    val streamingContext = new StreamingContext(sparkContext, Durations.seconds(5))

    val linesDS: ReceiverInputDStream[String] = streamingContext.socketTextStream("master", 12345)

    val resultDS: DStream[(String, Int)] = linesDS
      .flatMap(_.split(" "))
      .map((_, 1))
      .reduceByKey(_ + _)
      .transform((rdd:RDD[(String,Int)])=>{
        println("=======================")
        println("正在处理批次数据")
        rdd
      })

    //目标路径是一个文件夹，文件的名字系统生成的，自己可以指定后缀
    //每一批次计算结果都会生成一个结果文件，滚动生成的
    resultDS.saveAsTextFiles("spark/data/streams/stream","txt")

    streamingContext.start()
    streamingContext.awaitTermination()
    streamingContext.stop()

  }
}
