package com.kgc.bigdata.spark.streaming

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * 使用Spark Streaming整合Spark SQL进行统计分析操作
  */
object NetworkSQLWordCount {
  def main(args: Array[String]) {
    val sparkConf = new SparkConf().setAppName("NetworkSQLWordCount").setMaster("local[2]")
    val ssc = new StreamingContext(sparkConf, Seconds(5))

    val lines = ssc.socketTextStream("localhost", 6789)
    val result = lines.flatMap(_.split(" "))


    result.foreachRDD(rdd => {
      if (rdd.count() != 0) {

        //得到SQLContext实例
        val sqlContext = SQLContextSingleton.getInstance(rdd.sparkContext)
        import sqlContext.implicits._

        //将RDD转换成DataFrame
        val df = rdd.map(x => Word(x)).toDF
        df.registerTempTable("tb_word")

        //使用SparkSQL进行词频统计分析
        sqlContext.sql("select word, count(*) from tb_word group by word").show
      }
    })

    ssc.start()
    ssc.awaitTermination()
  }

}

//SQLContext获取实例单例类
object SQLContextSingleton {

  @transient private var instance: SQLContext = _

  def getInstance(sparkContext: SparkContext): SQLContext = {
    if (instance == null) {
      instance = new SQLContext(sparkContext)
    }
    instance
  }
}

//定义单词的case class类
case class Word(word: String)

