package org.shj.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.Seconds
import scala.collection.mutable.Queue
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import java.util.concurrent.Executors

/**
 * 本例使用queueStream, 取消checkpoint和remember, 在本地运行，可以看到sql只有一次有执行结果，
 * 表明数据丢失了
 */
object Streaming2Dataset {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("Streaming2Dataset")
    //conf.setMaster("local[*]")
    val ssc = new StreamingContext(conf, Seconds(5))    
    ssc.sparkContext.setLogLevel("WARN")
    
    ssc.checkpoint("/tmp/checkpoint") //queueStream 不支持 checkpoint
    ssc.remember(Seconds(40))
            
    /*val queueRdd = new Queue[RDD[String]]
    
    for(i <- 1 to 3){
      val lst = List("I love you", "I like you")
      val rdd = ssc.sparkContext.parallelize(lst)
      queueRdd += rdd
    }
    val input = ssc.queueStream(queueRdd)*/
    val input = ssc.socketTextStream("node1", 9999)
        
    val wordsDs = input.flatMap(_.split(" "))
    
    wordsDs.foreachRDD(rdd => {
      val spark = SparkSession.builder().config(rdd.sparkContext.getConf).getOrCreate()
      import spark.implicits._
      
      val wordDataFrame = rdd.toDF("word")
      wordDataFrame.createOrReplaceTempView("wordtable")
      
      /**
       * 当sql在另外的线程执行时，如果数据量过多，sql执行时间超过每次Streaming取数据的时间间隔，
       * 则会丢失数据。为了确保数据不丢失，需要设置ssc.remember(Seconds(10))
       */
      new Thread(new Runnable(){
        override def run(){
          Thread.sleep(10000) //模拟sql执行需要较长时间
          spark.sql("select word, count(word) as total from wordtable group by word").show()
        }
      }).start()
    })
    
    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }
}