package com.hrt.iceberg

import org.apache.spark.sql.SparkSession

/**
  * SparkSQL 操作Iceberg DDL 之隐藏分区
  */
object SparkOperatorIcebrgDDL1 {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder().master("local").appName("SparkOperateIceberg")
      //指定hadoop catalog，catalog名称为hadoop_prod
      .config("spark.sql.catalog.hadoop_prod", "org.apache.iceberg.spark.SparkCatalog")
      .config("spark.sql.catalog.hadoop_prod.type", "hadoop")
      .config("spark.sql.catalog.hadoop_prod.warehouse", "hdfs://mycluster/sparkoperateiceberg")
      .getOrCreate()

    //创建普通表
//    spark.sql(
//      """
//        | create table if not exists hadoop_prod.default.normal_tbl(id int,name string,age int) using iceberg
//      """.stripMargin)

    //创建分区表,以 loc 列为分区字段
//    spark.sql(
//      """
//        |create table if not exists hadoop_prod.default.partition_tbl(id int,name string,age int,loc string) using iceberg
//        |partitioned by (loc)
//      """.stripMargin)

    //向分区表中插入数据时，必须对分区列排序，否则报错：java.lang.IllegalStateException: Already closed files for partition:xxx
//    spark.sql(
//      """
//        |insert into table hadoop_prod.default.partition_tbl values (1,"zs",18,"beijing"),(3,"ww",20,"beijing"),(2,"ls",19,"shanghai"),(4,"ml",21,"shagnhai")
//      """.stripMargin)
//    //查询数据
//    spark.sql("select * from hadoop_prod.default.partition_tbl").show()

    /**
      * 创建分区表 partition_tbl1 ,指定分区为years
      */
//    spark.sql(
//      """
//        |create table if not exists hadoop_prod.default.partition_tbl1(id int ,name string,age int,regist_ts timestamp) using iceberg
//        |partitioned by (years(regist_ts))
//      """.stripMargin)

    //向表中插入数据，注意，插入的数据需要提前排序，必须排序，只要是相同日期数据写在一起就可以
    //(1,'zs',18,1608469830) --"2020-12-20 21:10:30"
    //(2,'ls',19,1634559630) --"2021-10-18 20:20:30"
    //(3,'ww',20,1603096230) --"2020-10-19 16:30:30"
    //(4,'ml',21,1639920630) --"2021-12-19 21:30:30"
    //(5,'tq',22,1608279630) --"2020-12-18 16:20:30"
    //(6,'gb',23,1576843830) --"2019-12-20 20:10:30"
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.partition_tbl1 values
//        |(1,'zs',18,cast(1608469830 as timestamp)),
//        |(3,'ww',20,cast(1603096230 as timestamp)),
//        |(5,'tq',22,cast(1608279630 as timestamp)),
//        |(2,'ls',19,cast(1634559630 as timestamp)),
//        |(4,'ml',21,cast(1639920630 as timestamp)),
//        |(6,'gb',23,cast(1576843830 as timestamp))
//      """.stripMargin)

    //查询结果
//    spark.sql(
//      """
//        |select * from hadoop_prod.default.partition_tbl1
//      """.stripMargin).show()


    /**
      * 创建分区表 partition_tbl2 ,指定分区为months,会按照“年-月”分区
      */
//    spark.sql(
//      """
//        |create table if not exists hadoop_prod.default.partition_tbl2(id int ,name string,age int,regist_ts timestamp) using iceberg
//        |partitioned by (months(regist_ts))
//      """.stripMargin)
//
//    //向表中插入数据，注意，插入的数据需要提前排序，必须排序，只要是相同日期数据写在一起就可以
//    //(1,'zs',18,1608469830) --"2020-12-20 21:10:30"
//    //(2,'ls',19,1634559630) --"2021-10-18 20:20:30"
//    //(3,'ww',20,1603096230) --"2020-10-19 16:30:30"
//    //(4,'ml',21,1639920630) --"2021-12-19 21:30:30"
//    //(5,'tq',22,1608279630) --"2020-12-18 16:20:30"
//    //(6,'gb',23,1576843830) --"2019-12-20 20:10:30"
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.partition_tbl2 values
//        |(1,'zs',18,cast(1608469830 as timestamp)),
//        |(5,'tq',22,cast(1608279630 as timestamp)),
//        |(2,'ls',19,cast(1634559630 as timestamp)),
//        |(3,'ww',20,cast(1603096230 as timestamp)),
//        |(4,'ml',21,cast(1639920630 as timestamp)),
//        |(6,'gb',23,cast(1576843830 as timestamp))
//      """.stripMargin)
//
//    //查询结果
//    spark.sql(
//      """
//        |select * from hadoop_prod.default.partition_tbl2
//      """.stripMargin).show()


    /**
      * 创建分区表 partition_tbl3 ,指定分区为 days,会按照“年-月-日”分区
      */
//    spark.sql(
//      """
//        |create table if not exists hadoop_prod.default.partition_tbl3(id int ,name string,age int,regist_ts timestamp) using iceberg
//        |partitioned by (days(regist_ts))
//      """.stripMargin)

    //向表中插入数据，注意，插入的数据需要提前排序，必须排序，只要是相同日期数据写在一起就可以
    //(1,'zs',18,1608469830) --"2020-12-20 21:10:30"
    //(2,'ls',19,1634559630) --"2021-10-18 20:20:30"
    //(3,'ww',20,1603096230) --"2020-10-19 16:30:30"
    //(4,'ml',21,1639920630) --"2021-12-19 21:30:30"
    //(5,'tq',22,1608279630) --"2020-12-18 16:20:30"
    //(6,'gb',23,1576843830) --"2019-12-20 20:10:30"
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.partition_tbl3 values
//        |(1,'zs',18,cast(1608469830 as timestamp)),
//        |(5,'tq',22,cast(1608279630 as timestamp)),
//        |(2,'ls',19,cast(1634559630 as timestamp)),
//        |(3,'ww',20,cast(1603096230 as timestamp)),
//        |(4,'ml',21,cast(1639920630 as timestamp)),
//        |(6,'gb',23,cast(1576843830 as timestamp))
//      """.stripMargin)

    //查询结果
//    spark.sql(
//      """
//        |select * from hadoop_prod.default.partition_tbl3
//      """.stripMargin).show()


    /**
      * 创建分区表 partition_tbl4 ,指定分区为 hours,会按照“年-月-日-时”分区
      */
//    spark.sql(
//      """
//        |create table if not exists hadoop_prod.default.partition_tbl4(id int ,name string,age int,regist_ts timestamp) using iceberg
//        |partitioned by (hours(regist_ts))
//      """.stripMargin)

    //向表中插入数据，注意，插入的数据需要提前排序，必须排序，只要是相同日期数据写在一起就可以
    //(1,'zs',18,1608469830) --"2020-12-20 21:10:30"
    //(2,'ls',19,1634559630) --"2021-10-18 20:20:30"
    //(3,'ww',20,1603096230) --"2020-10-19 16:30:30"
    //(4,'ml',21,1639920630) --"2021-12-19 21:30:30"
    //(5,'tq',22,1608279630) --"2020-12-18 16:20:30"
    //(6,'gb',23,1576843830) --"2019-12-20 20:10:30"
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.partition_tbl4 values
//        |(1,'zs',18,cast(1608469830 as timestamp)),
//        |(5,'tq',22,cast(1608279630 as timestamp)),
//        |(2,'ls',19,cast(1634559630 as timestamp)),
//        |(3,'ww',20,cast(1603096230 as timestamp)),
//        |(4,'ml',21,cast(1639920630 as timestamp)),
//        |(6,'gb',23,cast(1576843830 as timestamp))
//      """.stripMargin)

    //查询结果
//    spark.sql(
//      """
//        |select * from hadoop_prod.default.partition_tbl4
//      """.stripMargin).show()



    //创建分区表 partition_tbl5 ,指定分区按照id与2取余进行分区
    spark.sql(
      """
        |create table if not exists hadoop_prod.default.partition_tbl5(id int ,name string,age int,regist_ts timestamp) using iceberg
        |partitioned by (bucket(2,id))
      """.stripMargin)

    //向表中插入数据，注意，插入的数据需要提前排序，必须排序，只要是相同日期数据写在一起就可以
    //(1,'zs',18,1608469830) --"2020-12-20 21:10:30"
    //(2,'ls',19,1634559630) --"2021-10-18 20:20:30"
    //(3,'ww',20,1603096230) --"2020-10-19 16:30:30"
    //(4,'ml',21,1639920630) --"2021-12-19 21:30:30"
    //(5,'tq',22,1608279630) --"2020-12-18 16:20:30"
    //(6,'gb',23,1576843830) --"2019-12-20 20:10:30"
    spark.sql(
      """
        |insert into hadoop_prod.default.partition_tbl5 values
        |(1,'zs',18,cast(1608469830 as timestamp)),
        |(4,'ml',21,cast(1639920630 as timestamp)),
        |(5,'tq',22,cast(1608279630 as timestamp)),
        |(2,'ls',19,cast(1634559630 as timestamp)),
        |(3,'ww',20,cast(1603096230 as timestamp)),
        |(6,'gb',23,cast(1576843830 as timestamp))
      """.stripMargin)

    //查询结果
    spark.sql(
      """
        |select * from hadoop_prod.default.partition_tbl5
      """.stripMargin).show()
  }

}
