package industry_2024.industry_05.clean

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{current_timestamp, date_format, lit, to_timestamp}

import java.text.SimpleDateFormat
import java.util.Calendar

object clean_count {
  def main(args: Array[String]): Unit = {
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("数据清洗")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .enableHiveSupport()
      .getOrCreate()

    val day:Calendar=Calendar.getInstance()
    day.add(Calendar.DATE,-1)
    val yesterday=new SimpleDateFormat("yyyyMMdd").format(day.getTime)


    //  需要根据两个字段联合去重的
    def clean01(ods_name:String,dwd_name:String,col1:String,col2:String):Unit={
          spark.table(s"ods05.${ods_name}")
            .drop("etldate")
            .dropDuplicates(Seq(col1,col2))
            .withColumn("dwd_insert_user",lit("user1"))
            .withColumn(
              "dwd_insert_time",
              lit(to_timestamp(date_format(current_timestamp(),"yyyy-MM-dd HH:mm:ss"),"yyyy-MM-dd HH:mm:ss"))
            )
            .withColumn("dwd_modify_user",lit("user1"))
            .withColumn(
              "dwd_modify_time",
              lit(to_timestamp(date_format(current_timestamp(),"yyyy-MM-dd HH:mm:ss"),"yyyy-MM-dd HH:mm:ss"))
            )
            .withColumn("etldate",lit(yesterday))
            .write.format("hive").mode("append")
            .partitionBy("etldate")
            .saveAsTable(dwd_name)
    }

    //  需要根据1个字段联合去重的
    def clean02(ods_name: String, dwd_name: String, col: String): Unit = {
      spark.table(s"ods05.${ods_name}")
        .drop("etldate")
        .dropDuplicates(col)
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn(
          "dwd_insert_time",
          lit(to_timestamp(date_format(current_timestamp(), "yyyy-MM-dd HH:mm:ss"), "yyyy-MM-dd HH:mm:ss"))
        )
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn(
          "dwd_modify_time",
          lit(to_timestamp(date_format(current_timestamp(), "yyyy-MM-dd HH:mm:ss"), "yyyy-MM-dd HH:mm:ss"))
        )
        .withColumn("etldate", lit(yesterday))
        .write.format("hive").mode("append")
        .partitionBy("etldate")
        .saveAsTable(dwd_name)
    }

    //  不需要进行去重的题目的方法
    def clean03(ods_name: String, dwd_name: String): Unit = {
      spark.table(s"ods05.${ods_name}")
        .drop("etldate")
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn(
          "dwd_insert_time",
          lit(to_timestamp(date_format(current_timestamp(), "yyyy-MM-dd HH:mm:ss"), "yyyy-MM-dd HH:mm:ss"))
        )
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn(
          "dwd_modify_time",
          lit(to_timestamp(date_format(current_timestamp(), "yyyy-MM-dd HH:mm:ss"), "yyyy-MM-dd HH:mm:ss"))
        )
        .withColumn("etldate", lit(yesterday))
        .write.format("hive").mode("append")
        .partitionBy("etldate")
        .saveAsTable(dwd_name)
    }

    //  插入数据
   clean01("changerecord","fact_change_record","changeid","changemachineid")
    clean02("basemachine","dim_machine","basemachineid")
    clean03("producerecord","fact_produce_record")
    clean03("machinedata","fact_machine_data")







    spark.close()
  }

}
