package streaming.day01

import java.util.Properties

import org.apache.spark.SparkConf
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.{Seconds, StreamingContext}
import util.DBUtil

/**
  * 在spark streaming中使用spark sql
  */
object WordCountSQL {
  def main(args: Array[String]): Unit = {
    // 1一个核用于运行当前程序，一个核用于接收数据
    val conf = new SparkConf()
      .setAppName("WordCountSQL")
      .setMaster("local[*]")

    // 第二个参数为批次时间间隔，多长时间的数据集为一个批次，这个时间不能拍脑门设置，科学合理的设置,一般会比较短
    val ssc = new StreamingContext(conf, Seconds(2)) // 2秒钟一个批次 sparkContext

    // 接收受数据
    val words: DStream[String] = ssc.socketTextStream("10.172.50.12", 44444)

    words.foreachRDD(rdd=>{
      // sqlContext
      val spark: SparkSession = SparkSession.builder().config(rdd.sparkContext.getConf).getOrCreate()

      import spark.implicits._
      val wordsDataFrame  = rdd.flatMap(line => line.split(" ")).toDF("words")

      // 执行sql, 查询当前批次数据
      val result = spark.sql(
        """
          |select words, count(*) as total from tbl_words group by words
        """.stripMargin)

      // rdd 非空判断
      val props = new Properties()
      props.setProperty("user",DBUtil.DB_USERNAME)
      props.setProperty("password",DBUtil.DB_PASSWD)
      if(!result.rdd.isEmpty()){
        //写数据到数据库
        result.write.mode(SaveMode.Append).jdbc(DBUtil.DB_URL,DBUtil.DB_WORDCOUNT_TABLE,props)

      }

    })


  }

}
