package ods_industry_2024.gy_10.clean

import org.apache.hudi.DataSourceWriteOptions.{PARTITIONPATH_FIELD, PRECOMBINE_FIELD, RECORDKEY_FIELD}
import org.apache.hudi.QuickstartUtils.getQuickstartWriteConfigs
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{current_timestamp, date_format, lit, to_timestamp}

import java.text.SimpleDateFormat
import java.util.Calendar

object clean_count {
  def main(args: Array[String]): Unit = {
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("数据清洗")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .config("spark.sql.legacy.parquet.datetimeRebaseModeInRead","LEGACY")
      .config("spark.sql.legacy.avro.datetimeRebaseModeInWrite","LEGACY")
      .enableHiveSupport()
      .getOrCreate()

    val day=Calendar.getInstance()
    val current_time:String=new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(day.getTime)
    day.add(Calendar.DATE,-1)
    val yesterday=new SimpleDateFormat("yyyyMMdd").format(day.getTime)


    //  定义抽取的方法
    def to_dwd(ods_name:String,dwd_name:String,primarykey:String,precombinefield:String):Unit={
      val ods_path=s"hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_ods10.db/${ods_name}"
      val dwd_path=s"hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dwd10.db/${dwd_name}"

      spark.read.format("hudi").load(ods_path)
        .drop("etldate")
        .withColumn("dwd_insert_user",lit("user1"))
        .withColumn(
          "dwd_insert_time",
          to_timestamp(lit(current_time),"yyyy-MM-dd HH:mm:ss")
        )
        .withColumn("dwd_modify_user",lit("user1"))
        .withColumn(
          "dwd_modify_time",
          to_timestamp(date_format(current_timestamp(),"yyyy-MM-dd HH:mm:ss"),"yyyy-MM-dd HH:mm:ss")
        )
        .withColumn("etldate",lit(yesterday))
        .write.mode("append")
        .format("hudi")
        .options(getQuickstartWriteConfigs)
        .option(RECORDKEY_FIELD.key(),primarykey)
        .option(PRECOMBINE_FIELD.key(),precombinefield)
        .option(PARTITIONPATH_FIELD.key(),"etldate")
        .option("hoodie.table.name",dwd_name)
        .save(dwd_path)
      println(s"${dwd_name}完成")
    }

    to_dwd("environmentdata","fact_environment_data","EnvoId","InPutTime")
    to_dwd("changerecord","fact_change_record","ChangeID,ChangeMachineID","ChangeEndTime")
    to_dwd("basemachine","dim_machine","BaseMachineID","MachineAddDate")
    to_dwd("producerecord","fact_produce_record","ProduceRecordID,ProduceMachineID","ProduceCodeEndTime")
    to_dwd("machinedata","fact_machine_data","MachineRecordID","MachineRecordDate")




    spark.close()
  }

}
