package DianShang_2024.ds_07.clean

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, lit, to_timestamp, when}

import java.text.SimpleDateFormat
import java.util.Calendar

object clean01 {
  def main(args: Array[String]): Unit = {
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("数据抽取第一题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    spark.sql("use dwd07")

    val day:Calendar=Calendar.getInstance()
    val current_time=new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(day.getTime)
    day.add(Calendar.DATE,-1)
    val yesterday=new SimpleDateFormat("yyyyMMdd").format(day.getTime)


    //  比赛的话这里的分区也是选择的是yesterday,之所以选择最大的分区是因为我的数据抽取的分区日期不一样
    spark.table("ods07.user_info").createOrReplaceTempView("temp01")
    spark.table("ods07.user_info")
      .drop("etl_date")
      .where("etl_date=(select max(etl_date) from temp01)")
      .withColumn(
        "operate_time",
        when(
          col("operate_time").isNull, col("create_time")
        ).otherwise(col("operate_time")))
      .withColumn("dwd_insert_user",lit("user1"))
      .withColumn("dwd_insert_time",to_timestamp(lit(current_time)))
      .withColumn("dwd_modify_user",lit("user1"))
      .withColumn("dwd_modify_time",to_timestamp(lit(current_time)))
      .createOrReplaceTempView("user_info")

    val dwd=spark.sql(
      """
        |select
        |*
        |from dim_user_info
        |where etl_date=(select max(etl_date) from dim_user_info)
        |""".stripMargin)

      dwd
      .drop("etl_date")
      .withColumn(
        "operate_time",
        when(col("operate_time").isNotNull,col("operate_time")).otherwise(col("create_time"))
      )
      .createOrReplaceTempView("dwd")


    spark.sql(
      s"""
        |insert into dwd07.dim_user_info partition(etl_date="${yesterday}")
        |select
        |id,login_name,nick_name,passwd,name,phone_num,email,head_img,user_level,birthday,gender,create_time,operate_time,
        |dwd_insert_user,
        |dwd_insert_time,
        |dwd_modify_user,
        |dwd_modify_time
        |from(
        |select
        |id,login_name,nick_name,passwd,name,phone_num,email,head_img,user_level,birthday,gender,create_time,operate_time,
        |dwd_insert_user,
        |min(dwd_insert_time) over(partition by id) as dwd_insert_time,
        |dwd_modify_user,
        |max(dwd_modify_time) over(partition by id) as dwd_modify_time,
        |row_number() over(partition by id order by operate_time desc ) as row
        |from(
        |select * from user_info
        |union all
        |select * from dwd
        |) as r1
        |) as r2
        |where r2.row=1
        |""".stripMargin)









    spark.close()
  }

}
