package industry_2024.industry_10.clean

import org.apache.hudi.DataSourceWriteOptions.{PARTITIONPATH_FIELD, PRECOMBINE_FIELD, RECORDKEY_FIELD}
import org.apache.hudi.QuickstartUtils.getQuickstartWriteConfigs
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, date_format, lit}

import java.text.SimpleDateFormat
import java.util.{Calendar, Date}

object clean_count {
  def main(args: Array[String]): Unit = {
        val spark=SparkSession.builder()
          .master("local[*]")
          .appName("数据清洗")
          .config("hive.exec.dynamic.partition.mode","nonstrict")
          .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
          .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
          .config("spark.sql.legacy.avro.datetimeRebaseModeInWrite", "LEGACY") //设置重基准模式
          .config("spark.sql.legacy.avro.datetimeRebaseModeInRead", "LEGACY")
          .enableHiveSupport()
          .getOrCreate()

    val day=Calendar.getInstance()
    day.add(Calendar.DATE,-1)
    val yesterday=new SimpleDateFormat("yyyyMMdd").format(day.getTime)
    println(yesterday)

        //  创建清洗数据的方法
    def   to_dwd(ods_name:String,dwd_name:String,primarykey:String,precombinefield:String):Unit={
      val ods_path=s"hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_ods10.db/${ods_name}"
      val dwd_path=s"hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dwd10.db/${dwd_name}"

     val result=spark.read.format("hudi").load(ods_path)
        .drop("etldate")
        .withColumn("dwd_insert_user",lit("user1"))
        .withColumn(
          "dwd_insert_time",
          lit(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date()))
        )
        .withColumn("dwd_modify_user",lit("user1"))
        .withColumn(
          "dwd_modify_time",
          lit(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date()))
        )
        .withColumn("etldate",lit(yesterday))


      result.limit(5).show

        result
          .write.mode("append")
        .format("hudi")
        .options(getQuickstartWriteConfigs)
        .option(RECORDKEY_FIELD.key(),primarykey)
        .option(PRECOMBINE_FIELD.key(),precombinefield)
          .option(PARTITIONPATH_FIELD.key(),"etldate")
        .option("hoodie.table.name",dwd_name)
        .save(dwd_path)
    }

    //  需要更改一个字段值的清洗方法
    def to_dwd02(ods_name: String, dwd_name: String, primarykey: String, precombinefield: String): Unit = {
      val ods_path = s"hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_ods10.db/${ods_name}"
      val dwd_path = s"hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dwd10.db/${dwd_name}"

      val result=spark.read.format("hudi").load(ods_path)
        .drop("etldate")
        .withColumn("MachineAddDate",date_format(col("MachineAddDate"),"yyyy-MM-dd HH:mm:ss"))
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn(
          "dwd_insert_time",
          lit(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date()))
        )
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn(
          "dwd_modify_time",
          lit(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date()))
        )
        .withColumn("etldate", lit(yesterday))
//        .limit(5).show

//      spark.read.format("hudi").load(dwd_path).limit(5).show
        result.write.mode("append")
        .format("hudi")
        .options(getQuickstartWriteConfigs)
        .option(RECORDKEY_FIELD.key(), primarykey)
        .option(PRECOMBINE_FIELD.key(), precombinefield)
          .option(PARTITIONPATH_FIELD.key(),"etldate")
        .option("hoodie.table.name", dwd_name)
        .save(dwd_path)
    }


    //  后面临时加的，发现fact_machine_data有一个字段也需要更改数据
    def to_dwd03(ods_name: String, dwd_name: String, primarykey: String, precombinefield: String): Unit = {
      val ods_path = s"hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_ods10.db/${ods_name}"
      val dwd_path = s"hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dwd10.db/${dwd_name}"

      val result = spark.read.format("hudi").load(ods_path)
        .drop("etldate")
        .withColumn("MachineRecordDate", date_format(col("MachineRecordDate"), "yyyy-MM-dd HH:mm:ss"))
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn(
          "dwd_insert_time",
          lit(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date()))
        )
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn(
          "dwd_modify_time",
          lit(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date()))
        )
        .withColumn("etldate", lit(yesterday))
      //        .limit(5).show

      //      spark.read.format("hudi").load(dwd_path).limit(5).show
      result.write.mode("append")
        .format("hudi")
        .options(getQuickstartWriteConfigs)
        .option(RECORDKEY_FIELD.key(), primarykey)
        .option(PRECOMBINE_FIELD.key(), precombinefield)
        .option(PARTITIONPATH_FIELD.key(),"etldate")
        .option("hoodie.table.name", dwd_name)
        .save(dwd_path)
    }

    to_dwd("environmentdata","fact_environment_data","EnvoId","InPutTime")
    to_dwd("changerecord","fact_change_record","ChangeID,ChangeMachineID","ChangeEndTime")
    to_dwd02("basemachine","dim_machine","BaseMachineID","MachineAddDate")
    to_dwd("producerecord","fact_produce_record","ProduceRecordID,ProduceMachineID","ProduceCodeEndTime")
    to_dwd03("machinedata","fact_machine_data","MachineRecordID","MachineRecordDate")




    spark.close()
  }

}
