package ds_industry_2025.industry.gy_05.T2

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{lit, to_timestamp}

import java.text
import java.text.SimpleDateFormat
import java.util.Calendar

object t2_count {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("数据清洗")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sq.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val day=Calendar.getInstance()
    val current_time=new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(day.getTime)
    day.add(Calendar.DATE,-1)
    val yesterday=new SimpleDateFormat("yyyyMMdd").format(day.getTime)

    //  todo 创建写入dwd层的方法1 需要联合去重的
    def to_dwd01(ods_name:String,dwd_name:String,col1:String,col2:String):Unit={
          spark.table(s"ods.${ods_name}")
            .dropDuplicates(Seq(col1,col2))
            .drop("etldate")
            .withColumn("dwd_insert_user",lit("user1"))
            .withColumn("dwd_insert_time",to_timestamp(lit(current_time)))
            .withColumn("dwd_modify_user",lit("user1"))
            .withColumn("dwd_modify_time",to_timestamp(lit(current_time)))
            .withColumn("etldate",lit(yesterday))
            .write.format("hive").mode("append")
            .partitionBy("etldate")
            .saveAsTable(s"dwd.${dwd_name}")
      println(s"${dwd_name}写入完成")
    }

    //  todo 创建写入dwd层的方法2 只需要根据一个字段去重
    def to_dwd02(ods_name: String, dwd_name: String, col1: String): Unit = {
      spark.table(s"ods.${ods_name}")
        .dropDuplicates(col1)
        .drop("etldate")
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn("dwd_insert_time", to_timestamp(lit(current_time)))
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn("dwd_modify_time", to_timestamp(lit(current_time)))
        .withColumn("etldate", lit(yesterday))
        .write.format("hive").mode("append")
        .partitionBy("etldate")
        .saveAsTable(s"dwd.${dwd_name}")
      println(s"${dwd_name}写入完成")
    }

    //  todo 创建写入dwd层的方法3 不需要去重的
    def to_dwd03(ods_name: String, dwd_name: String): Unit = {
      spark.table(s"ods.${ods_name}")
        .drop("etldate")
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn("dwd_insert_time", to_timestamp(lit(current_time)))
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn("dwd_modify_time", to_timestamp(lit(current_time)))
        .withColumn("etldate", lit(yesterday))
        .write.format("hive").mode("append")
        .partitionBy("etldate")
        .saveAsTable(s"dwd.${dwd_name}")
      println(s"${dwd_name}写入完成")
    }

    //  todo 写入数据
    to_dwd01("changerecord","fact_change_record","changeid","changemachineid")
    to_dwd02("basemachine","dim_machine","basemachineid")
    to_dwd03("producerecord","fact_produce_record")
    to_dwd03("machinedata","fact_machine_data")










    spark.close()
  }

}
