package com.shujia.spark2

import java.util.concurrent.TimeUnit

import org.apache.spark.sql.streaming.ProcessingTime
import org.apache.spark.sql.{DataFrame, Dataset, SaveMode, SparkSession}

object Demo3Stream {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .appName("StructuredNetworkWordCount")
      .config("spark.sql.shuffle.partitions", "1")
      .master("local")
      .getOrCreate()

    import spark.implicits._


    // Create DataFrame representing the stream of input lines from connection to localhost:9999
    // nc -lk 8888
    val lines: DataFrame = spark
      .readStream
      .format("socket")
      .option("host", "node1")
      .option("port", 8888)
      .load()

    // Split the lines into words
    val words = lines
      .as[String] //指定数据的类型. 转换成DataSet,  如果DataSet之后一列,列名默认是value
      .flatMap(_.split(","))

    // Generate running word count
    val wordCounts = words
      .groupBy("value") //指定列进行分组
      .count() //统计数量


    val query = wordCounts.writeStream
      .outputMode("complete")
      .format("console")
      .trigger(ProcessingTime("5 seconds")) //指定任务触发时间
      .start()

    //一致运行
    query.awaitTermination()


  }
}
