package com.hiscene.structurestream
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * root
 * |-- value: string (nullable = true)
 *
 * -------------------------------------------
 * Batch: 0
 * -------------------------------------------
 * +------+-----+
 * | value|count|
 * +------+-----+
 * | hello|    1|
 * | world|    1|
 * |hadoop|    2|
 * +------+-----+
 *
 * -------------------------------------------
 * Batch: 1
 * -------------------------------------------
 * +------+-----+
 * | value|count|
 * +------+-----+
 * | hello|    2|
 * | world|    2|
 * |hadoop|    4|
 * +------+-----+
 *
 *
 * input nc -lk
 * nc -lk 8866
 * hello world hadoop hadoop
 * hello world hadoop hadoop
 * hello world hadoop hadoop
 */
object SocketSream {
  def main(args: Array[String]): Unit = {

    import org.apache.spark.sql.Dataset


    val spark = SparkSession
      .builder
        .master("local[*]")
      .appName("StructuredNetworkWordCount")
      .getOrCreate()
    spark.sparkContext.setLogLevel("ERROR")

    import spark.implicits._

    val lines: DataFrame = spark
      .readStream
      .format("socket")
      .option("host", "192.168.1.23")
      .option("port", 8866)
      .load()

    val words: Dataset[String] = lines.as[String].flatMap(_.split(" "))

    words.printSchema()

    val wordCounts = words.groupBy("value").count()

    val query = wordCounts.writeStream
      .outputMode("complete")
      .format("console")
      .start()

    query.awaitTermination()
  }
}
