package industry_2024.industry_09.clean

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{current_timestamp, date_format, lit, to_timestamp}

import java.text.SimpleDateFormat
import java.util.Calendar

object clean_count {
  def main(args: Array[String]): Unit = {
            val spark=SparkSession.builder()
              .master("local[*]")
              .appName("数据清洗")
              .config("hive.exec.dynamic.partition.mode","nonstrict")
//              .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
//              .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
              .enableHiveSupport()
              .getOrCreate()

    spark.sql("use dwd09")

    val day:Calendar=Calendar.getInstance()
    val current_time=new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(day.getTime)
    day.add(Calendar.DATE,-1)
    val yesterday=new SimpleDateFormat("yyyyMMdd").format(day.getTime)


    def to_dwd(ods_name:String,dwd_name:String):Unit= {
      spark.table(s"ods09.${ods_name}")
        .withColumn("dwd_insert_user",lit("user1"))
        .withColumn("dwd_insert_time",to_timestamp(lit(current_time),"yyyy-MM-dd HH:mm:ss"))
        .withColumn("dwd_modify_user",lit("user1"))
        .withColumn(
          "dwd_modify_time",
          to_timestamp(date_format(current_timestamp(),"yyyy-MM-dd HH:mm:ss"),"yyyy-MM-dd HH:mm:ss")
        )
        .withColumn("etldate",lit(yesterday))
        .write.mode("append")
        .format("hive")
        .partitionBy("etldate")
        .saveAsTable(s"dwd09.${dwd_name}")
    }

    //  需要根据一个字段去重的方法
    def to_dwd02(ods_name: String, dwd_name: String): Unit = {
      spark.table(s"ods09.${ods_name}")
        .dropDuplicates("basemachineid")
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn("dwd_insert_time", to_timestamp(lit(current_time), "yyyy-MM-dd HH:mm:ss"))
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn(
          "dwd_modify_time",
          to_timestamp(date_format(current_timestamp(), "yyyy-MM-dd HH:mm:ss"), "yyyy-MM-dd HH:mm:ss")
        )
        .withColumn("etldate", lit(yesterday))
        .write.mode("append")
        .format("hive")
        .partitionBy("etldate")
        .saveAsTable(s"dwd09.${dwd_name}")
    }


    //  需要根据两个字段联合去重的
    def to_dwd03(ods_name: String, dwd_name: String): Unit = {
      spark.table(s"ods09.${ods_name}")
        .dropDuplicates(Seq("changeid","changemachineid"))
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn("dwd_insert_time", to_timestamp(lit(current_time), "yyyy-MM-dd HH:mm:ss"))
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn(
          "dwd_modify_time",
          to_timestamp(date_format(current_timestamp(), "yyyy-MM-dd HH:mm:ss"), "yyyy-MM-dd HH:mm:ss")
        )
        .withColumn("etldate", lit(yesterday))
        .write.mode("append")
        .format("hive")
        .partitionBy("etldate")
        .saveAsTable(s"dwd09.${dwd_name}")
    }

    to_dwd("environmentdata", "fact_environment_data")
    to_dwd03("changerecord", "fact_change_record")
    to_dwd02("basemachine", "dim_machine")
    to_dwd("producerecord", "fact_produce_record")
    to_dwd("machinedata", "fact_machine_data")

    spark.sql("select * from dwd09.fact_machine_data limit 5").show


    spark.close()
  }
}
