package com.shujia.dim

import com.shujia.common.utils.SparkMain
import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}

object DimUsertagMskDay extends SparkMain {
  override def run(spark: SparkSession): Unit = {
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 从ODS层加载用户画像表中的数据
    val userTagDay: DataFrame = spark.table("ods.ods_usertag_d").where($"day_id" === dayId)

    // 对用户画像数据中的手机号、姓名、身份证号做一个脱敏处理，最终将结果保存到dim层中
    userTagDay
      .select(
        upper(md5(concat($"mdn", expr("'shujia'"))))
        , upper(md5(concat($"name", expr("'shujia'"))))
        , $"gender"
        , $"age"
        , upper(md5(concat($"id_number", expr("'shujia'"))))
        , $"number_attr"
        , $"trmnl_brand"
        , $"trmnl_price"
        , $"packg"
        , $"conpot"
        , $"resi_grid_id"
        , $"resi_county_id"
      )
      // 保存数据
      .write
      .format("csv")
      .option("sep", ",")
      .mode(SaveMode.Overwrite)
      .save(s"/daas/motl/dim/dim_usertag_msk_d/day_id=$dayId")

    // 增加分区
    spark.sql(
      s"""
         |alter table dim.dim_usertag_msk_d add if not exists partition(day_id='$dayId')
         |""".stripMargin)


  }

  /**
   * 提交命令：
   * spark-submit --master yarn-client --conf spark.sql.shuffle.partitions=10 --class com.shujia.dim.DimUsertagMskDay --jars common-1.0.jar dim-1.0.jar 20230105
   */
}
