package DianShang_2024.ds_02.clean

import breeze.linalg.Vector.castFunc
import org.apache.hudi.DataSourceWriteOptions.{PARTITIONPATH_FIELD, PRECOMBINE_FIELD, RECORDKEY_FIELD}
import org.apache.hudi.QuickstartUtils.getQuickstartWriteConfigs
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, lit, max}

import java.text.SimpleDateFormat
import java.util.Date
import scala.math.Ordering.Implicits.infixOrderingOps

object table_preparation {
  def main(args: Array[String]): Unit = {

    // 读取ods表的数据直接写入dwd层，可以直接建表


    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("准备表")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    spark.sql("use dwd_ds_hudi02")

      //  不知道为啥，我就是不可以直接使用写入数据的方法直接建表，我好像只可以使用sparksql的方式建hudi表()

    //  sparksql的方式
    //  dim_user_info
    spark.sql("drop table if exists dim_user_info")
    spark.sql(
      """
        |create table if not exists dim_user_info(
        |id int,
        |login_name String,
        |nick_name String,
        |passwd String,
        |name String,
        |phone_num String,
        |email String,
        |head_img String,
        |user_level String,
        |birthday String,
        |gender String,
        |create_time String,
        |operate_time String,
        |dwd_insert_user String,
        |dwd_insert_time String,
        |dwd_modify_user String,
        |dwd_modify_time String
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="id",
        |preCombineField="operate_time",
        |hoodie.datasource.hive_aync.mode="hms"
        |)
        |partitioned by(etl_date String)
        |""".stripMargin)

    //  dim_sku_info
    spark.sql("drop table if exists dim_sku_info")
    spark.sql(
      """
        |create table if not exists dim_sku_info(
        |id int,
        |spu_id int,
        |price decimal(10,0),
        |sku_name String,
        |sku_desc String,
        |weight decimal(10,2),
        |tm_id int,
        |category3_id int,
        |sku_default_img String,
        |create_time String,
        |dwd_insert_user String,
        |dwd_insert_time String,
        |dwd_modify_user String,
        |dwd_modify_time String
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="id",
        |preCombineField="dwd_modify_time",
        |hoodie.datasource.hive_aync.mode="hms"
        |)
        |partitioned by(etl_date String)
        |""".stripMargin)

    //  dim_province
    spark.sql("drop table if exists dim_province")
    spark.sql(
      """
        |create table if not exists dim_province(
        |id int,
        |name String,
        |region_id String,
        |area_code String,
        |iso_code String,
        |create_time String,
        |dwd_insert_user String,
        |dwd_insert_time String,
        |dwd_modify_user  String,
        |dwd_modify_time String
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="id",
        |preCombineField="dwd_modify_time",
        |hoodie.datasource.hive_aync.mode="hms"
        |)
        |partitioned by(etl_date String)
        |""".stripMargin)

    //  dim_region
    spark.sql("drop table if exists dim_region")
    spark.sql(
      """
        |create table if not exists dim_region(
        |id String,
        |region_name String,
        |create_time String,
        |dwd_insert_user String,
        |dwd_insert_time String,
        |dwd_modify_user String,
        |dwd_modify_time String
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="id",
        |preCombineField="dwd_modify_time",
        |hoodie.datasource.hive_aync.mode="hms"
        |)
        |partitioned by(etl_date String)
        |""".stripMargin)

  //  fact_order_info
    spark.sql("drop table if exists fact_order_info")
    spark.sql(
      """
        |create table if not exists fact_order_info(
        |id int,
        |consignee String,
        |consignee_tel String,
        |final_total_amount decimal(16,2),
        |order_status String,
        |user_id int,
        |delivery_address String,
        |order_comment String,
        |out_trade_no String,
        |trade_body String,
        |create_time String,
        |operate_time String,
        |expire_time String,
        |tracking_no String,
        |parent_order_id int,
        |img_url String,
        |province_id int,
        |benefit_reduce_amount decimal(16,2),
        |original_total_amount decimal(16,2),
        |feight_fee decimal(16,2),
        |dwd_insert_user String,
        |dwd_insert_time String,
        |dwd_modify_user String,
        |dwd_modfiy_time String
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="id",
        |preCombineField="operate_time",
        |hoodie.datasource.hive_aync.mode="hms"
        |)
        |partitioned by(etl_date String)
        |""".stripMargin)

    //  fact_order_detail
    spark.sql("drop table if exists fact_order_detail")
    spark.sql(
      """
        |create table if not exists fact_order_detail(
        |id int,
        |order_id int,
        |sku_id int,
        |sku_name String,
        |img_url String,
        |order_price decimal(10,2),
        |sku_num String,
        |create_time String,
        |source_type String,
        |source_id int,
        |dwd_insert_user String,
        |dwd_insert_time String,
        |dwd_modify_user String,
        |dwd_modify_time String
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="id",
        |preCombineFiled="dwd_modify_time",
        |hoodie.datasource.hive_aync.mode="hms"
        |)
        |partitioned by(etl_date String)
        |""".stripMargin)

    spark.sql("show tables").show


    //  创建将ods层的数据直接写入dwd层的方法(这里是存在create_time和operate_time字段的方法)
    def ods_to_dwd01(ods_name: String, dwd_name: String, premary_key: String, combine_key: String): Unit = {
      val ods_path = s"hdfs://192.168.40.110:9000/user/hive/warehouse/ods_ds_hudi02.db/${ods_name}"
      val dwd_path = s"hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi02.db/${dwd_name}"

      val max_partition = spark.read.format("hudi").load(ods_path)
        .agg(max(col("etl_date")))
        .first()
        .get(0)
        .toString
      println("ods最新分区的值:", max_partition)

      spark.read.format("hudi").load(ods_path)
//        .where(col("etl_date") === max_partition)
        .where(col("etl_date")==="20240101")
        .drop("etl_date")
//        .withColumn("create_time",col("create_time").cast("String"))
//        .withColumn("operate_time",col("operate_time").cast("String"))
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn(
          "dwd_insert_time",
          lit(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date())).cast("String")
        )
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn(
          "dwd_modify_time",
          lit(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date())).cast("String")
        )
        .withColumn("etl_date", lit(max_partition))
        .write.mode("append")
        .format("hudi")
        .options(getQuickstartWriteConfigs)
        .option(RECORDKEY_FIELD.key(), premary_key)
        .option(PRECOMBINE_FIELD.key(), combine_key)
        .option(PARTITIONPATH_FIELD.key(), "etl_date")
        .option("hoodie.table.name", dwd_name)
        .save(dwd_path)
      println("写入成功")
    }

    //        由于之前给表分区的时候失误，导致现在有的表分区是20240101，有的是做题时的日期，所以有的需要使用最大的分区值
    def ods_to_dwd02(ods_name: String, dwd_name: String, premary_key: String, combine_key: String): Unit = {
      val ods_path = s"hdfs://192.168.40.110:9000/user/hive/warehouse/ods_ds_hudi02.db/${ods_name}"
      val dwd_path = s"hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi02.db/${dwd_name}"

      val max_partition = spark.read.format("hudi").load(ods_path)
        .agg(max(col("etl_date")))
        .first()
        .get(0)
        .toString
      println("ods最新分区的值:", max_partition)

      spark.read.format("hudi").load(ods_path)
        .where(col("etl_date") === max_partition)
//        .where(col("etl_date") === "20240101")
        .drop("etl_date")
        //        .withColumn("create_time",col("create_time").cast("String"))
        //        .withColumn("operate_time",col("operate_time").cast("String"))
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn(
          "dwd_insert_time",
          lit(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date())).cast("String")
        )
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn(
          "dwd_modify_time",
          lit(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date())).cast("String")
        )
        .withColumn("etl_date", lit(max_partition))
        .write.mode("append")
        .format("hudi")
        .options(getQuickstartWriteConfigs)
        .option(RECORDKEY_FIELD.key(), premary_key)
        .option(PRECOMBINE_FIELD.key(), combine_key)
        .option(PARTITIONPATH_FIELD.key(), "etl_date")
        .option("hoodie.table.name", dwd_name)
        .save(dwd_path)
      println("写入成功")
    }




    //  写入数据
    ods_to_dwd01("user_info", "dim_user_info", "id", "operate_time")
    ods_to_dwd02("sku_info", "dim_sku_info", "id", "dwd_modify_time")
    ods_to_dwd01("base_province", "dim_province", "id", "dwd_modify_time")
    ods_to_dwd01("base_region", "dim_region", "id", "dwd_modify_time")









    spark.close()
  }

}
