package ds_industry_2025.industry.gy_08.T2

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

import java.text.SimpleDateFormat
import java.util.Calendar
//  todo 数据清洗
object t2_count {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t2")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val day=Calendar.getInstance()
    val current_time=new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(day.getTime)
    day.add(Calendar.DATE,-1)
    val yesterday=new SimpleDateFormat("yyyyMMdd").format(day.getTime)

    //  todo 写入数据的方法1 不需要处理字段的
    def to_dwd(ods_name:String,dwd_name:String):Unit={
      spark.table(s"ods.${ods_name}")
        .drop("etldate")
        .withColumn("dwd_insert_user",lit("user1"))
        .withColumn("dwd_insert_time",to_timestamp(lit(current_time)))
        .withColumn("dwd_modify_user",lit("user1"))
        .withColumn("dwd_modify_time",to_timestamp(lit(current_time)))
        .withColumn("etldate",lit(yesterday))
        .write.format("hive").mode("append")
        .partitionBy("etldate")
        .saveAsTable(s"dwd.${dwd_name}")
      println(s"${dwd_name}表格写入完成")
    }

    // todo 第二题需要对两个字段联合去重
    def to_dwd02(ods_name: String, dwd_name: String): Unit = {
      spark.table(s"ods.${ods_name}")
        .dropDuplicates(Seq("changeid","changemachineid"))
        .drop("etldate")
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn("dwd_insert_time", to_timestamp(lit(current_time)))
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn("dwd_modify_time", to_timestamp(lit(current_time)))
        .withColumn("etldate", lit(yesterday))
        .write.format("hive").mode("append")
        .partitionBy("etldate")
        .saveAsTable(s"dwd.${dwd_name}")
      println(s"${dwd_name}表格写入完成")
    }

    // todo 第三题需要对一个字段进行去重
    def to_dwd03(ods_name: String, dwd_name: String): Unit = {
      spark.table(s"ods.${ods_name}")
        .dropDuplicates("basemachineid")
        .drop("etldate")
        .withColumn("dwd_insert_user", lit("user1"))
        .withColumn("dwd_insert_time", to_timestamp(lit(current_time)))
        .withColumn("dwd_modify_user", lit("user1"))
        .withColumn("dwd_modify_time", to_timestamp(lit(current_time)))
        .withColumn("etldate", lit(yesterday))
        .write.format("hive").mode("append")
        .partitionBy("etldate")
        .saveAsTable(s"dwd.${dwd_name}")
      println(s"${dwd_name}表格写入完成")
    }

    //  todo 写入数据
    to_dwd("environmentdata","fact_environment_data")
    to_dwd02("changerecord","fact_change_record")
    to_dwd03("basemachine","dim_machine")
    to_dwd("producerecord","fact_produce_record")
    to_dwd("machinedata","fact_machine_data")




    spark.close()
  }

}
