package edu.csl.study.spark.basic

import edu.csl.study.spark.basic.SparkSQL.rootDir
import org.apache
import org.apache.spark
import org.apache.spark.sql.streaming.{StreamingQuery, Trigger}
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

object StructuredStreaming {
  val rootDir = System.getProperty("user.dir")+ "\\testFile\\"
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder().appName("StructuredStreaming").master("local[2]").getOrCreate()
    spark.sparkContext.setLogLevel("WARN")

    val lines:DataFrame = source_Socket(spark)

    import spark.implicits._
    val words0:Dataset[String]= lines.as[String]
    val words:Dataset[String]= words0.flatMap(_.split(","))
    val wordCounts:DataFrame = words.groupBy("value").count()

    /**
     * 3中输出模式：
     *  Complete表示结果全量输出，这种模式必须存在聚合操作
     *  Append 追加模式，非Watermark模式下，不能使用任何聚合操作
     *  Update 更新模式 Spark 2.1.1之后支持，如果查询结果中没有聚合操作，那么等同于Append模式。
     */
    val query:StreamingQuery = wordCounts.writeStream.outputMode("complete")
      .trigger(Trigger.ProcessingTime(3000))
      .format("console")
      .start()

    query.awaitTermination();

  }

  /**
   * socket来源
   *   nc -lk 8091 测试
   * @param spark
   * @return
   */
  def source_Socket(spark:SparkSession):DataFrame = {
    val source:DataFrame = spark.readStream.format("socket")
      .option("host","192.168.100.20")
      .option("port",8091)
      .load();
    source
  }

}
