package com.hrt.iceberg.unauto

import org.apache.hadoop.conf.Configuration
import org.apache.iceberg.catalog.TableIdentifier
import org.apache.iceberg.hadoop.HadoopCatalog
import org.apache.spark.sql.SparkSession

/**
  *  Spark sql与Iceberg DDL 操作
  */
object SparkIceberg2 {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder().master("local").appName("SparkOperateIceberg")
      //设置Hadoop Catalog
      .config("spark.sql.catalog.hadoop_prod", "org.apache.iceberg.spark.SparkCatalog")
      .config("spark.sql.catalog.hadoop_prod.type", "hadoop")
      .config("spark.sql.catalog.hadoop_prod.warehouse", "hdfs://mycluster/sparkoperateiceberg")
      // 添加分区操作的配置
      .config("spark.sql.extensions","org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions")
      .getOrCreate()

    /**
      * Alter 对分区进行操作
      */
//    spark.sql("drop table hadoop_prod.default.mytbl")
//    spark.sql(
//      """
//        |create table hadoop_prod.default.mytbl(id int,name string,loc string,ts timestamp) using iceberg
//      """.stripMargin)
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.mytbl values
//        |(1,'zs',"beijing",cast(1608469830 as timestamp)),
//        |(3,'ww',"shanghai",cast(1603096230 as timestamp))
//      """.stripMargin)


    /**给表 hadoop_prod.default.mytbl 添加 loc 列为分区列 没有分区的数据也不影响*/
//    spark.sql(
//      """
//        |alter table hadoop_prod.default.mytbl add partition field loc
//      """.stripMargin)
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.mytbl values
//        |(5,'tq',"hangzhou",cast(1608279630 as timestamp)),
//        |(2,'ls',"shandong",cast(1634559630 as timestamp))
//      """.stripMargin)

    /**给表 hadoop_prod.default.mytbl 添加 ts 列为分区列*/
//    spark.sql(
//      """
//        |alter table hadoop_prod.default.mytbl add partition field years(ts)
//      """.stripMargin)
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.mytbl values
//        |(4,'ml',"beijing",cast(1639920630 as timestamp)),
//        |(6,'gb',"tianjin",cast(1576843830 as timestamp))
//      """.stripMargin)

    /**删除分区*/
//    spark.sql(
//    """
//      |alter table hadoop_prod.default.mytbl drop partition field loc
//    """.stripMargin)
//
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.mytbl values
//        |(4,'ml',"beijing",cast(1639920630 as timestamp)),
//        |(6,'gb',"tianjin",cast(1576843830 as timestamp))
//      """.stripMargin)

//    spark.sql(
//      """
//        |alter table hadoop_prod.default.mytbl drop partition field years(ts)
//      """.stripMargin)

//    spark.sql(
//      """
//        |insert into hadoop_prod.default.mytbl values
//        |(4,'ml',"beijing",cast(1639920630 as timestamp)),
//        |(6,'gb',"tianjin",cast(1576843830 as timestamp))
//      """.stripMargin)

//    spark.sql(
//      """
//        |select * from hadoop_prod.default.mytbl
//      """.stripMargin).show()

    /**
      *  Alter : 增加列、删除列、重命名列，只有在spark3.x版本支持
      */

    //给 hadoop_prod.default.r2  表增加列 name string,age int
//    spark.sql(
//      """
//        | alter table hadoop_prod.default.r2 add column name string,age int
//      """.stripMargin)

    //删除列 id
//    spark.sql(
//      """
//        | alter table hadoop_prod.default.r2 drop column id
//      """.stripMargin)

    //修改列名 age ->agex
//    spark.sql(
//      """
//        |alter table hadoop_prod.default.r2 rename column age to agex
//      """.stripMargin)
//    spark.sql(
//      """
//        |select * from hadoop_prod.default.r2
//      """.stripMargin).show()

    /**
      * replace table ... as select ....
      */
//    spark.sql(
//      """
//        |create table hadoop_prod.default.r1 (id int,name string,age int) using iceberg
//      """.stripMargin)
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.r1 values (1,'zs',18),(2,'ls',19)
//      """.stripMargin)
//    spark.sql(
//      """
//        |select * from hadoop_prod.default.r1
//      """.stripMargin).show()


//    spark.sql(
//      """
//        |create table hadoop_prod.default.r2 (id int,loc string) using iceberg
//      """.stripMargin)
//    spark.sql(
//      """
//        |insert into hadoop_prod.default.r2 values (1,'beijing'),(2,'shanghai')
//      """.stripMargin)

//    spark.sql(
//      """
//        |replace table hadoop_prod.default.r1 as select id,loc from hadoop_prod.default.r2
//      """.stripMargin)
//    spark.sql(
//      """
//        |select * from hadoop_prod.default.r1
//      """.stripMargin).show()

//    spark.sql(
//      """
//        |drop table hadoop_prod.default.r1
//      """.stripMargin)


    /**
      *  使用create table ....using iceberg as  select ....
      */
//    spark.sql(
//      """
//        | create table hadoop_prod.default.mytbl1 (id int,name string,age int,loc string) using iceberg
//      """.stripMargin)
//    spark.sql(
//      """
//        |insert into  hadoop_prod.default.mytbl1 values (1,'zs',18,'bj'),(3,'ww',20,'bj'),(2,'ls',19,'sh'),(4,'ml',21,'sh')
//      """.stripMargin)

//    spark.sql(
//      """
//        | create table hadoop_prod.default.mytbl2 using iceberg select id,name,age ,loc from hadoop_prod.default.mytbl1
//      """.stripMargin)
//
//    spark.sql(
//      """
//        | select * from hadoop_prod.default.mytbl2
//      """.stripMargin).show()

    /**
      *  创建分区表 及隐藏分区
      */

    /**创建普通表*/
//    spark.sql(
//      """
//        | create table hadoop_prod.default.tbl (id int ,name string,age int) using iceberg
//      """.stripMargin)

    /**创建分区表*/
//    spark.sql(
//      """
//        |create table if not exists hadoop_prod.default.partition_tbl (id int,name string,age int,loc string) using iceberg partitioned by (loc)
//      """.stripMargin)

    /**
     * 插入数据 ,注意：向Iceberg分区表中插入数据之前，需要对数据按照分区列进行排序
     * 意思就是在插入数据的时候相同分区的要在一起，否则查完一个分区的然后再插入别的分区的会报分区已经关闭
     */
//    spark.sql(
//      """
//        | insert into hadoop_prod.default.partition_tbl values (1,'zs',18,'bj'),(3,'ww',20,'bj'),(2,'ls',19,'sh'),(4,'ml',21,'sh')
//      """.stripMargin)

    /**查询数据*/
//    spark.sql("select * from hadoop_prod.default.partition_tbl").show()

    /**Iceberg 创建隐藏分区 ： 本质就是基于已有的列进行转换得到分区*/
//    spark.sql(
//      """
//        | create table if not exists hadoop_prod.default.partition_tbl1 (id int,name string,age int,regist_ts timestamp) using iceberg
//        | partitioned by (years(regist_ts))
//        |
//      """.stripMargin)
//    spark.sql(
//      """
//        | create table if not exists hadoop_prod.default.partition_tbl2 (id int,name string,age int,regist_ts timestamp) using iceberg
//        | partitioned by (months(regist_ts))
//        |
//      """.stripMargin)
//    spark.sql(
//      """
//        | create table if not exists hadoop_prod.default.partition_tbl3 (id int,name string,age int,regist_ts timestamp) using iceberg
//        | partitioned by (days(regist_ts))
//        |
//      """.stripMargin)
//    spark.sql(
//      """
//        | create table if not exists hadoop_prod.default.partition_tbl4 (id int,name string,age int,regist_ts timestamp) using iceberg
//        | partitioned by (hours(regist_ts))
//        |
//      """.stripMargin)

    /**向表中插入数据，注意，插入的数据需要提前排序，必须排序，只要是相同日期数据写在一起就可以*/
    // (1,'zs',18,1608469830) --"2020-12-20 21:10:30"
    // (2,'ls',19,1634559630) --"2021-10-18 20:20:30"
    // (3,'ww',20,1603096230) --"2020-10-19 16:30:30"
    // (4,'ml',21,1639920630) --"2021-12-19 21:30:30"
    // (5,'tq',22,1608279630) --"2020-12-18 16:20:30"
    // (6,'gb',23,1576843830) --"2019-12-20 20:10:30"


//    spark.sql(
//      """
//        | insert into hadoop_prod.default.partition_tbl4 values
//        | (1,'zs',18,cast(1608469830 as timestamp)),
//        | (5,'tq',22,cast(1608279630 as timestamp)),
//        | (3,'ww',20,cast(1603096230 as timestamp)),
//        | (2,'ls',19,cast(1634559630 as timestamp)),
//        | (4,'ml',21,cast(1639920630 as timestamp)),
//        | (6,'gb',23,cast(1576843830 as timestamp))
//        | """.stripMargin)
    //查询结果
//    spark.sql( """select * from hadoop_prod.default.partition_tbl4 """.stripMargin).show()


  }

}
