package industry_2024.industry_08.clean

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{current_timestamp, date_format, lit, to_timestamp}

import java.text.SimpleDateFormat
import java.util.Calendar

object clean_count {
  def main(args: Array[String]): Unit = {
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("数据清洗")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
//      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
//      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    spark.sql("use dwd08")

    val day=Calendar.getInstance()
    val current_time=new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(day.getTime)
    day.add(Calendar.DATE,-1)
    val yesterday=new SimpleDateFormat("yyyyMMdd").format(day.getTime)

    //  定义不需要去重的直接抽取数据的方法
    def to_dwd(ods_name:String,dwd_name:String):Unit={
      spark.table(s"ods08.${ods_name}")
        .drop("etldate")
        .withColumn("dwd_insert_user",lit("user1"))
        .withColumn("dwd_insert_time",to_timestamp(lit(current_time),"yyyy-MM-dd HH:mm:ss"))
        .withColumn("dwd_modify_user",lit("user1"))
        .withColumn(
          "dwd_modify_time",
          to_timestamp(date_format(current_timestamp(),"yyyy-MM-dd HH:mm:ss"),"yyyy-MM-dd HH:mm:ss")
        )
        .withColumn("etldate",lit(yesterday))
        .write.mode("append")
        .format("hive")
        .partitionBy("etldate")
        .saveAsTable(s"dwd08.${dwd_name}")
    }

    //  定义需要单个去重的数据抽取方法
    def to_dwd02(ods_name: String, dwd_name: String): Unit = {
      spark.table(s"ods08.${ods_name}")
        .drop("etldate")
        .dropDuplicates("basemachineid")
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn("dwd_insert_time", to_timestamp(lit(current_time), "yyyy-MM-dd HH:mm:ss"))
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn(
          "dwd_modify_time",
          to_timestamp(date_format(current_timestamp(), "yyyy-MM-dd HH:mm:ss"), "yyyy-MM-dd HH:mm:ss")
        )
        .withColumn("etldate", lit(yesterday))
        .write.mode("append")
        .format("hive")
        .partitionBy("etldate")
        .saveAsTable(s"dwd08.${dwd_name}")
    }

    //  定义需要联合去重的数据抽取方法
    def to_dwd03(ods_name: String, dwd_name: String): Unit = {
      spark.table(s"ods08.${ods_name}")
        .drop("etldate")
        .dropDuplicates(Seq("changeid","changemachineid"))
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn("dwd_insert_time", to_timestamp(lit(current_time), "yyyy-MM-dd HH:mm:ss"))
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn(
          "dwd_modify_time",
          to_timestamp(date_format(current_timestamp(), "yyyy-MM-dd HH:mm:ss"), "yyyy-MM-dd HH:mm:ss")
        )
        .withColumn("etldate", lit(yesterday))
        .write.mode("append")
        .format("hive")
        .partitionBy("etldate")
        .saveAsTable(s"dwd08.${dwd_name}")
    }



    to_dwd("environmentdata","fact_environment_data")
    to_dwd03("changerecord","fact_change_record")
    to_dwd02("basemachine","dim_machine")
    to_dwd("producerecord","fact_produce_record")
    to_dwd("machinedata","fact_machine_data")

    spark.sql("select * from fact_machine_data limit 5").show






    spark.close()

  }

}
