package com.alison.sink

import org.apache.spark.sql.SparkSession

object E3_sink_kafka {
  def main(args: Array[String]): Unit = {
    out_kafka()

  }

  def out_kafka() = {
    // 创建 SparkSession
    val spark = SparkSession.builder()
      .appName("HelloStructuredStreaming")
      .master("local[*]")
      .getOrCreate()

    // 导入隐式转换
    import spark.implicits._

    // 创建一个流式DataFrame，这里从Socket读取数据
    val lines = spark.readStream
      .format("socket")
      .option("host", "192.168.56.104")
      .option("port", 9999)
      .load()

    // 单词统计
    val words = lines.as[String]
      .flatMap(_.split("\\W+"))
      .groupBy("value")
      .count()
      .map(row => row.getString(0) + "," + row.getLong(1))
      .toDF("value") // 写入数据时候, 必须有一列 "value"

    // 启动查询, 把结果输出至kafka
    val query = words.writeStream
      .outputMode("update")
      .format("kafka") // 输出至kafka
      .option("kafka.bootstrap.servers", "localhost:9092") // kafka 配置
      .option("topic", "sourcev2-topic") // kafka 主题
      //      .option("path", "./output") // 输出目录
      .option("path", "file:///D:\\workspace\\lab\\learnbigdata\\learnspark\\structstream\\src\\main\\resources\\output") // 输出目录
      //      .option("checkpointLocation", "./ck1") // 必须指定 checkpoint 目录
      .option("checkpointLocation", "file:///D:\\workspace\\lab\\learnbigdata\\learnspark\\structstream\\src\\main\\resources\\ck1")
      .start()

    // 等待应用程序终止
    query.awaitTermination()

    //关闭 Spark
    spark.stop()
  }

}
