package com.hrt.iceberg.unauto

import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  *  Spark 整合Iceberg 写操作
 *   merge into |insert |静态分区 动态分区覆盖|update |delete | dataframe写入iceberg表
  */
object SparkIceberg4 {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder().master("local").appName("SparkOperateIceberg")
      //设置Hadoop Catalog
      .config("spark.sql.catalog.hadoop_prod", "org.apache.iceberg.spark.SparkCatalog")
      .config("spark.sql.catalog.hadoop_prod.type", "hadoop")
      .config("spark.sql.catalog.hadoop_prod.warehouse", "hdfs://mycluster/sparkoperateiceberg")
      .config("spark.sql.extensions","org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions")
      .getOrCreate()

    /**
      *  dataframe 写入iceberg表
      */
    val nameJsonList = List[String](
      "{\"id\":1,\"name\":\"zs\",\"age\":18,\"loc\":\"beijing\"}",
      "{\"id\":2,\"name\":\"ls\",\"age\":19,\"loc\":\"shanghai\"}",
      "{\"id\":3,\"name\":\"ww\",\"age\":20,\"loc\":\"beijing\"}",
      "{\"id\":4,\"name\":\"ml\",\"age\":21,\"loc\":\"shanghai\"}")

    import spark.implicits._
    val df: DataFrame = spark.read.json(nameJsonList.toDS)


    df.sortWithinPartitions($"loc")
      .writeTo("hadoop_prod.default.df_tbl2")
      .partitionedBy($"loc")
      .create()

    spark.read.table("hadoop_prod.default.df_tbl2").show()


//    df.writeTo("hadoop_prod.default.df_tbl1").create()
//
//    spark.read.table("hadoop_prod.default.df_tbl1").show()


    /**
      *  update
      *
      */
//    spark.sql(
//      """
//        |create table hadoop_prod.default.update_tbl (id int,name string,age int) using iceberg
//      """.stripMargin)
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.update_tbl values (1,"zs",18),(2,"ls",19),(3,"ww",20),(4,"ml",21),(5,"tq",22),(6,"gb",23)
//      """.stripMargin)

    //update
//    spark.sql(
//      """
//        |update hadoop_prod.default.update_tbl set name = 'xxx' ,age = 100
//        |where id <=3
//      """.stripMargin)
//    spark.sql(
//      """
//        |select * from hadoop_prod.default.update_tbl
//      """.stripMargin).show()




    /**
      * delete from : 删除数据
      */
//    spark.sql(
//      """
//        |create table hadoop_prod.default.delete_tbl (id int,name string,age int) using iceberg
//      """.stripMargin)
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.delete_tbl values (1,"zs",18),(2,"ls",19),(3,"ww",20),(4,"ml",21),(5,"tq",22),(6,"gb",23)
//      """.stripMargin)
//    spark.sql(
//      """
//        |select * from hadoop_prod.default.delete_tbl
//      """.stripMargin).show()

    //delete from
//    spark.sql(
//      """
//        | delete from hadoop_prod.default.delete_tbl where id >=3 and id <6
//      """.stripMargin)


//    spark.sql(
//      """
//        | delete from hadoop_prod.default.delete_tbl where id=1
//      """.stripMargin)
//    spark.sql("select * from hadoop_prod.default.delete_tbl").show()



    /**
      *  insert overwrite
      */
    //insert overwrite 针对分区表有 动态分区覆盖和静态分区覆盖
    //动态分区覆盖
//    spark.sql(
//      """
//        | insert overwrite hadoop_prod.default.test1 select id,name,loc from hadoop_prod.default.test3
//      """.stripMargin)
//    spark.sql(
//      """
//        |select * from hadoop_prod.default.test1
//      """.stripMargin).show()

    //静态分区覆盖
//    spark.sql(
//      """
//        | insert overwrite hadoop_prod.default.test1
//        | partition (loc = 'guangzhou')
//        | select id,name from hadoop_prod.default.test3
//      """.stripMargin)
//
//    spark.sql(
//      """
//        |select * from hadoop_prod.default.test1
//      """.stripMargin).show()


    /**
     * overwrite 普通表
     */
//    spark.sql(
//      """
//        | insert overwrite hadoop_prod.default.test2 select id,name,loc from hadoop_prod.default.test3
//      """.stripMargin)
//    spark.sql(
//      """
//        |select * from hadoop_prod.default.test2
//      """.stripMargin).show()


//    spark.sql(
//      """
//        |drop table hadoop_prod.default.test1
//      """.stripMargin)
//    spark.sql(
//      """
//        |create table hadoop_prod.default.test1 (id int,name string,loc string)
//        |using iceberg
//        |partitioned by (loc)
//      """.stripMargin)
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.test1 values (1,"zs","beijing"),(2,"ls","shanghai")
//      """.stripMargin)
//
//    spark.sql(
//      """
//        |create table hadoop_prod.default.test2 (id int,name string,loc string) using iceberg
//      """.stripMargin)
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.test2 values (10,"x1","shandong"),(11,"x2","hunan")
//      """.stripMargin)
//
//    spark.sql(
//      """
//        |create table hadoop_prod.default.test3 (id int,name string,loc string) using iceberg
//      """.stripMargin)
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.test3 values (3,"ww","beijing"),(4,"ml","shanghai"),(5,"tq","guangzhou")
//      """.stripMargin)



    /**
     * insert into ....
     * merge into ....
     */
//    spark.sql(
//      """
//        |create table hadoop_prod.default.a (id int,name string,age int) using iceberg
//      """.stripMargin)
//    spark.sql(
//      """
//        |create table hadoop_prod.default.b (id int,name string,age int,tp string) using iceberg
//      """.stripMargin)
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.a values (1,"zs",18),(2,"ls",19),(3,"ww",20)
//      """.stripMargin)
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.b values (1,"zs",30,"delete"),(2,"李四",31,"update"),(4,"王五",32,"add")
//      """.stripMargin)

    /**merge into*/
    // hadoop_prod.default.a 最终结果：(2,"李四",31),(3,"ww",20),(4,"王五",32)
//    spark.sql(
//      """
//        | merge into hadoop_prod.default.a t1
//        | using (select id,name,age,tp from hadoop_prod.default.b ) t2
//        | on t1.id = t2.id
//        | when matched and t2.tp = 'delete' then delete
//        | when matched and t2.tp = 'update' then update set t1.name = t2.name ,t1.age = t2.age
//        | when not matched then insert (id,name,age) values (t2.id,t2.name,t2.age)
//      """.stripMargin)

//    spark.sql(
//      """
//        |select * from hadoop_prod.default.a
//      """.stripMargin).show()
  }

}
