package com.shujia.spark.streaming

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.spark.streaming.dstream.DStream

object Demo05ForeachRDD {
  def main(args: Array[String]): Unit = {

    // 同transform一样，会将每个批次的数据转换成RDD进行处理，区别在于是否需要返回值
    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.replace("$", ""))
      .master("local[2]") // 至少需要两个线程，因为接收数据需要一致占用一个线程
      .config("spark.default.parallelism", "2")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()

    // 构建StreamingContext：需要传入SparkContext以及Duration表示批次大小
    val ssc: StreamingContext = new StreamingContext(spark.sparkContext, Durations.seconds(5))

    // 通过Socket连接 从nc服务中加载数据
    // Socket服务端：nc -lk 8888
    // 会返回DStream类型，SparkStreaming的一种编程模型
    val lineDS: DStream[String] = ssc.socketTextStream("master", 8888)

    import spark.implicits._
    import org.apache.spark.sql.functions._

    lineDS
      .foreachRDD(rdd=>{
        // 由于DStream保存到MySQL不方便，先转成RDD再转成DF进行保存
        val lineDF: DataFrame = rdd.map(s => Tuple1(s)).toDF("line")
        val wordCntDSLDF: DataFrame = lineDF
          .select(explode(split($"line", ",")) as "word")
          .groupBy($"word")
          .agg(count("*") as "cnt")

        wordCntDSLDF
          .write
          .mode(SaveMode.Append)
          .format("jdbc")
          .option("url", "jdbc:mysql://rm-bp1y7dm47j8h060vy4o.mysql.rds.aliyuncs.com:3307/bigdata32")
          .option("dbtable", "bigdata32.word_cnt_zzk")
          .option("user", "bigdata32")
          .option("password", "123456")
          .save()
      })

    ssc.start()
    ssc.awaitTermination()

  }

}
