package com.hrt.iceberg.unauto

import java.util.concurrent.TimeUnit

import org.apache.spark.sql.streaming.{StreamingQuery, Trigger}
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  *  Structured Streaming 实时读取Kafka 数据写入Iceberg
 *   如果小文件特别多，自己定期执行合并小文件的api即可
  */
object StructuredStreamingIceberg {
  def main(args: Array[String]): Unit = {
    //1.创建SparkSession
    val spark: SparkSession = SparkSession.builder().master("local").appName("SparkOperateIceberg")
      //设置Hadoop Catalog
      .config("spark.sql.catalog.hadoop_prod", "org.apache.iceberg.spark.SparkCatalog")
      .config("spark.sql.catalog.hadoop_prod.type", "hadoop")
      .config("spark.sql.catalog.hadoop_prod.warehouse", "hdfs://hadoop102:8020/sparkoperaticeberg")
      // 扩展增加分区的配置
      .config("spark.sql.extensions","org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions")
      .getOrCreate()

    //2.创建Iceberg表
    spark.sql(
      """
        | create table if not exists hadoop_prod.iceberg_db.iceberg_tbl(
        | current_day string,
        | user_id string,
        | page_id string,
        | channel string,
        | action string
        | ) using iceberg
      """.stripMargin)

    //3.读取Kafka 中的数据
    val df: DataFrame = spark.readStream.format("kafka")
      .option("kafka.bootstrap.servers", "192.168.56.13:9092,192.168.56.14:9092,192.168.56.15:9092")
      .option("auto.offset.reset", "latest")  //earliest
      .option("group.id", "iceberg-kafka-group-id")
      .option("subscribe", "kafka-iceberg-topic")
      .load()

    import spark.implicits._
    import org.apache.spark.sql.functions._
    //4.转化df
    val df2: DataFrame = df.selectExpr("cast (key as string) ","cast (value as string)").as[(String,String)].toDF("key","data")
    val result: DataFrame = df2.withColumn("current_day", split(col("data"), "\t")(0))
      .withColumn("user_id", split(col("data"), "\t")(2))
      .withColumn("page_id", split(col("data"), "\t")(3))
      .withColumn("channel", split(col("data"), "\t")(4))
      .withColumn("action", split(col("data"), "\t")(5))
      .select("current_day", "user_id", "page_id", "channel", "action")

    //5.将结果写入iceberg,启动流
    val query: StreamingQuery = result.writeStream
      .format("iceberg")
      .outputMode("append")
      .trigger(Trigger.ProcessingTime(10, TimeUnit.SECONDS))
      .option("path", "hadoop_prod.iceberg_db.iceberg_tbl") // path指定表名
      .option("fanout-enabled", "true") // 不会让数据写入分区的时候关闭,写完一个批次才会关闭
      .option("checkpointLocation", "hdfs://hadoop102:8020/structuredstreaming_checkpoint")
      .start()

    query.awaitTermination()
  }

}
