package DianShang_2024.ds_06.extract

import org.apache.hudi.DataSourceWriteOptions.{PARTITIONPATH_FIELD, PRECOMBINE_FIELD, RECORDKEY_FIELD}
import org.apache.hudi.QuickstartUtils.getQuickstartWriteConfigs
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, date_format, lit}
import shapeless.syntax.typeable.typeableOps

import java.text.SimpleDateFormat
import java.util.{Date, Properties}

object tableData_preparation {
  def main(args: Array[String]): Unit = {
    //  准备sparksql集成hudi的环境
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("数据抽取准备表和数据库")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

  //  准备连接mysql的配置
    val mysql_jdbc=new Properties()
    mysql_jdbc.setProperty("user","root")
    mysql_jdbc.setProperty("password","123456")
    mysql_jdbc.setProperty("driver","com.mysql.jdbc.Driver")

    spark.sql("use ods_ds_hudi")

    //  定义将mysql里面的数据写入到hudi的方法
    //  由于时间日期格式的数据，直接写入hudi表会存在各种问题(例如转化成了时间戳，但是格式化确数据不对)，所以这里需要对mysql的时间类型的数据再进行一次
    //  格式化，这样写入hudi表的数据就不会有问题，注意：只要有关时间类型的数据都需要这样做
    def write_hudi(table_name:String,pri_key:String,pre_key:String):Unit={
      //  定义保存的hdfs路径
      val path=s"hdfs://192.168.40.110:9000/user/hive/warehouse/ods_ds_hudi.db/${table_name}"
      //  读取mysql里面的数据并且添加字段
      spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/shtd_store?useSSL=false",s"${table_name}",mysql_jdbc)
        .withColumn("etl_date",lit("20240101"))
        .withColumn("create_time",date_format(col("create_time"),"yyyy-MM-dd HH:mm:ss") )
        .write.mode("append")
        .format("hudi")
        .options(getQuickstartWriteConfigs)
        .option(PRECOMBINE_FIELD.key(),s"${pre_key}")
        .option(RECORDKEY_FIELD.key(),s"${pri_key}")
        .option(PARTITIONPATH_FIELD.key(),"etl_date")
        .option("hoodie.table.name",s"${table_name}")
        .save(path)
    }

    //  将表的数据写入hive
    //   user_info:因为operate_time存在null值，所以传输会失败
//    write_hudi("user_info","id","operate_time")
    //  所以下面也要单独拿出来写入数据
    val user_info_path="hdfs://192.168.40.110:9000/user/hive/warehouse/ods_ds_hudi.db/user_info"
    spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/shtd_store?useSSL=false", "user_info", mysql_jdbc)
      .withColumn("etl_date", lit("20240101"))
      .filter(col("operate_time").isNotNull)
      .withColumn("birthday",date_format(col("birthday"),"yyyy-MM-dd"))
      .withColumn("create_time",date_format(col("create_time"),"yyyy-MM-dd HH:mm:ss"))
      .withColumn("operate_time",date_format(col("operate_time"),"yyyy-MM-dd HH:mm:ss"))
      .write.mode("append")
      .format("hudi")
      .options(getQuickstartWriteConfigs)
      .option(PRECOMBINE_FIELD.key(), "operate_time")
      .option(RECORDKEY_FIELD.key(), "id")
      .option(PARTITIONPATH_FIELD.key(), "etl_date")
      .option("hoodie.table.name", "user_info")
      .save(user_info_path)


//    //  sku_info
    write_hudi("sku_info","id","create_time")



    //  base_province_path
    val base_province_path="hdfs://192.168.40.110:9000/user/hive/warehouse/ods_ds_hudi.db/base_province"
    spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/shtd_store?useSSL=false","base_province",mysql_jdbc)
      .withColumn(
        "create_time",
        lit(   new SimpleDateFormat(("yyyy-MM-dd HH:mm:ss")).format(new Date()))
      )
      .withColumn(
        "etl_date",
        lit("20240101")
      )
      .write.mode("append")
      .format("hudi")
      .options(getQuickstartWriteConfigs)
      .option(PRECOMBINE_FIELD.key(), "create_time")
      .option(RECORDKEY_FIELD.key(), "id")
      .option(PARTITIONPATH_FIELD.key(), "etl_date")
      .option("hoodie.table.name", "base_province")
      .save(base_province_path)


    //  base_region
    //  要记得在添加完create_time字段之后再使用 .withColumn("create_time",date_format(col("create_time"),"yyyy-MM-dd HH:mm:ss"))
    //  这样插入的数据才不会有错
    val base_region_path="hdfs://192.168.40.110:9000/user/hive/warehouse/ods_ds_hudi.db/base_region"
    spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/shtd_store?useSSL=false","base_region",mysql_jdbc)
      .withColumn(
        "create_time",
        lit( new SimpleDateFormat(("yyyy-MM-dd HH:mm:ss")).format(new Date()) )
      )
      .withColumn(
        "etl_date",
        lit("20240101")
      )
      .write.mode("append").format("hudi")
      .options(getQuickstartWriteConfigs)
      .option(PRECOMBINE_FIELD.key(),"create_time")
      .option(RECORDKEY_FIELD.key(),"id")
      .option(PARTITIONPATH_FIELD.key(),"etl_date")
      .option("hoodie.table.name","base_region")
      .save(base_region_path)

    //  order_info
//    write_hudi("order_info","id","operate_time")
val order_info_path = "hdfs://192.168.40.110:9000/user/hive/warehouse/ods_ds_hudi.db/order_info"
    //  读取mysql里面的数据并且添加字段
    spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/shtd_store?useSSL=false", "order_info", mysql_jdbc)
      .withColumn("etl_date", lit("20240101"))
      .withColumn("create_time", date_format(col("create_time"),"yyyy-MM-dd HH:mm:ss"))
      .withColumn("operate_time", date_format(col("operate_time"),"yyyy-MM-dd HH:mm:ss"))
      .withColumn("expire_time", date_format(col("expire_time"),"yyyy-MM-dd HH:mm:ss"))
      .write.mode("append")
      .format("hudi")
      .options(getQuickstartWriteConfigs)
      .option(PRECOMBINE_FIELD.key(), "operate_time")
      .option(RECORDKEY_FIELD.key(), "id")
      .option(PARTITIONPATH_FIELD.key(), "etl_date")
      .option("hoodie.table.name", "order_info")
      .save(order_info_path)



    //  order_detail
    write_hudi("order_detail","id","create_time")


    //  关闭sparksql的环境
    spark.close()
  }

}
