package com.bigdata.streaming

import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo7SaveFIle {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .appName("ds")
      .master("local[2]")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext

    val ssc = new StreamingContext(sc, Durations.seconds(5))

    val linesDS: ReceiverInputDStream[String] = ssc.socketTextStream("node01", 8888)

    val countDS: DStream[(String, Int)] = linesDS
      .flatMap(_.split(","))
      .map((_, 1))
      .reduceByKey(_ + _)

    //保存数据到磁盘
    //滚动生成新的文件目录
    countDS.saveAsTextFiles("data/stream", "txt")


    // 写出到kafka

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
