package com.alison.source

import org.apache.spark.sql.SparkSession

object E2_Rate_source {

  def main(args: Array[String]): Unit = {
    //    rate_source
    source_socket

  }

  def rate_source() = {
    // 创建 SparkSession
    val spark = SparkSession.builder()
      .appName("E2_Rate_source")
      .master("local[*]")
      .getOrCreate()

    val rows = spark.readStream
      .format("rate")
      .option("rowsPerSecond", 3) // 设置每秒产生的数据的条数, 默认是 1
      .option("rampUpTime", 1) // 设置多少秒到达指定速率 默认为 0
      .option("numPartitions", 2) /// 设置分区数 默认是 spark 的默认并行度
      .load

    val query = rows.writeStream
      .outputMode("append")
      .format("console")
      .start
    query.awaitTermination()
    spark.stop
  }

  def source_socket() = {
    val spark = SparkSession.builder().master("local[*]")
      .appName("socket")
      .getOrCreate()

    import spark.implicits._

    val lines = spark.readStream
      .format("socket")
      .option("host", "192.168.56.104")
      .option("port", 9999)
      .load


    val words = lines.as[String].flatMap(_.split(" "))
    """
      |root
      | |-- value: string (nullable = true)
      |""".stripMargin
    words.printSchema()
    // 对单词进行分组和聚合
    var wordCounts = words.groupBy("value").count()
//    wordCounts = words.groupBy().count()
    // 启动查询, 把结果打印到控制台
    val query = wordCounts.writeStream
      .outputMode("complete") // 或者选择其他适合的输出模式
      .format("console")
      .start()

    query.awaitTermination()

    spark.stop()
  }

}
