package com.hrt.iceberg

import org.apache.spark.sql.SparkSession

/**
  * SparkSQL 操作Iceberg DDL 之添加、删除分区
  *
  */
object SparkOperatorIcebrgDDL3 {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder().master("local").appName("SparkOperateIceberg")
      //指定hadoop catalog，catalog名称为hadoop_prod
      .config("spark.sql.catalog.hadoop_prod", "org.apache.iceberg.spark.SparkCatalog")
      .config("spark.sql.catalog.hadoop_prod.type", "hadoop")
      .config("spark.sql.catalog.hadoop_prod.warehouse", "hdfs://mycluster/sparkoperateiceberg")
      .config("spark.sql.extensions", "org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions")
      .getOrCreate()

    //1.创建普通表
//    spark.sql(
//      """
//        | create table hadoop_prod.default.mytbl(id int,name string,loc string,ts timestamp) using iceberg
//      """.stripMargin)

    //2.向表中插入数据，并查询
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.mytbl values
//        |(1,'zs',"beijing",cast(1608469830 as timestamp)),
//        |(3,'ww',"shanghai",cast(1603096230 as timestamp))
//      """.stripMargin)
//    spark.sql("select * from hadoop_prod.default.mytbl").show()

    //3.将 loc 列添加成分区,必须添加 config("spark.sql.extensions", "org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions") 配置
//    spark.sql(
//      """
//        |alter table hadoop_prod.default.mytbl add partition field loc
//      """.stripMargin)

    //4.向表 mytbl中继续插入数据，之前数据没有分区，之后数据有分区
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.mytbl values
//        |(5,'tq',"hangzhou",cast(1608279630 as timestamp)),
//        |(2,'ls',"shandong",cast(1634559630 as timestamp))
//      """.stripMargin )
//    spark.sql("select * from hadoop_prod.default.mytbl").show()

    //5.将 ts 列通过分区转换添加为分区列
//    spark.sql(
//      """
//        |alter table hadoop_prod.default.mytbl add partition field years(ts)
//      """.stripMargin)

    //6.向表 mytbl中继续插入数据，之前数据没有分区，之后数据有分区
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.mytbl values
//        |(4,'ml',"beijing",cast(1639920630 as timestamp)),
//        |(6,'gb',"tianjin",cast(1576843830 as timestamp))
//      """.stripMargin )
//    spark.sql("select * from hadoop_prod.default.mytbl").show()

    //7.删除表 mytbl 中的loc分区
//    spark.sql(
//      """
//        |alter table hadoop_prod.default.mytbl drop partition field loc
//      """.stripMargin)
    //8.继续向表 mytbl 中插入数据，并查询
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.mytbl values
//        |(4,'ml',"beijing",cast(1639920630 as timestamp)),
//        |(6,'gb',"tianjin",cast(1576843830 as timestamp))
//      """.stripMargin )
//    spark.sql("select * from hadoop_prod.default.mytbl").show()

    //9.删除表 mytbl 中的years(ts) 分区
    spark.sql(
      """
        |alter table hadoop_prod.default.mytbl drop partition field years(ts)
      """.stripMargin)
    //10.继续向表 mytbl 中插入数据，并查询
    spark.sql(
      """
        |insert into hadoop_prod.default.mytbl values
        |(5,'tq',"hangzhou",cast(1608279630 as timestamp)),
        |(2,'ls',"shandong",cast(1634559630 as timestamp))
      """.stripMargin )
    spark.sql("select * from hadoop_prod.default.mytbl").show()


  }

}
