package cn.wangjie.spark.monitor

import org.apache.spark.sql.streaming.StreamingQueryListener.{QueryProgressEvent, QueryStartedEvent, QueryTerminatedEvent}
import org.apache.spark.sql.streaming.{OutputMode, StreamingQuery, StreamingQueryListener, StreamingQueryProgress}
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * 使用Structured Streaming从TCP Socket实时读取数据，进行词频统计，将结果打印到控制台。
 */
object StructuredMonitor {
	
	def main(args: Array[String]): Unit = {
		
		// 1. 创建SparkSession实例对象
		val spark: SparkSession = SparkSession
    		.builder()
    		.appName(this.getClass.getSimpleName.stripSuffix("$"))
    		.master("local[2]")
			// 设置SparkSQL Shuffle时分区数目
    		.config("spark.sql.shuffle.partitions", "2")
    		.getOrCreate()
		// 导入隐式转换和函数库
		import spark.implicits._
		
		// 2. 从Socket 读取数据
		val inputStreamDF: DataFrame = spark
			.readStream
			.format("socket")
			.option("host", "node1.itcast.cn")
			.option("port", 9999)
			.load()
		
		// 3. 依据业务，进行词频统计WordCount
		// 数据样本：spark hadoop spark hadoop spark hive
		val resultStreamDF: DataFrame = inputStreamDF
			// 将DataFrame转换为Dataset，强数据类型，安全
    		.as[String]
			// 过滤不合格的数据
    		.filter(line => null != line && line.trim.length > 0)
			// 对每条数据划分单词
    		.flatMap(line => line.trim.split("\\s+"))
			// 按照单词分组，统计count
    		.groupBy($"value").count()
		
		// 4. 输出数据和启动启用
		val query: StreamingQuery = resultStreamDF
			.writeStream
			//使用Complete完全模式输出，将ResultTable中所有数据进行输出
			.outputMode(OutputMode.Complete())
			.format("console")
    		.option("numRows", "10")
    		.option("truncate", "false")
			// 针对流式应用，需要启动执行
			.start()
		

/*
		// TODO：调用StreamingQuery API获取流式应用相关信息
		while(true){
			println(s"====================== ${System.nanoTime()} ==========================")
			println("Query Name: "+ query.name)
			println("Query ID: "+ query.id)
			println("Query RunID: "+ query.runId)
			println("Query IsActive: "+ query.isActive)
			println("Query Status: "+ query.status)
			println("Query LastProgress: "+ query.lastProgress)
			Thread.sleep(10 * 1000)
		}
*/
		
		// TODO: 使用StreamingQueryListener监听器获取流式应用运行状况信息
		spark.streams.addListener(new StreamingQueryListener {
			override def onQueryStarted(event: QueryStartedEvent): Unit = {
				println("Query started: " + event.id)
			}
			
			override def onQueryProgress(event: QueryProgressEvent): Unit = {
				println(s"Query Progress: ${event.progress}")
				
				val progress: StreamingQueryProgress = event.progress
				progress.sources(0).startOffset
				progress.sources(0).endOffset
				progress.sources(0).prettyJson
			}
			
			override def onQueryTerminated(event: QueryTerminatedEvent): Unit = {
				println("Query terminated: " + event.id)
			}
		})
		
		query.awaitTermination() // 当流式应用运行以后，正常情况一直运行，除非人为或程序异常终止，等待结束
		query.stop()
	}
	
}
