package com.shujia.dim

import com.shujia.common.SparkTool
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object DimUsertagMskDay extends SparkTool {
  override def run(spark: SparkSession): Unit = {
    import spark.implicits._
    import org.apache.spark.sql.functions._

    val userTagDF: DataFrame = spark.table("ods.ods_usertag_d")

    userTagDF
      // 进行分区裁剪
      .where($"day_id" === day_id)
      // 对敏感字段进行脱敏
      .select(
        upper(md5(concat($"mdn", expr("'shujia'")))) as "mdn"
        , upper(md5(concat($"name", expr("'shujia'")))) as "name"
        , $"gender"
        , $"age"
        , upper(md5(concat($"id_number", expr("'shujia'")))) as "id_number"
        , $"number_attr"
        , $"trmnl_brand"
        , $"trmnl_price"
        , $"packg"
        , $"conpot"
        , $"resi_grid_id"
        , $"resi_county_id")
      // 将结果保存
      .write
      .mode(SaveMode.Overwrite)
      .format("csv")
      .option("sep", ",")
      .save(s"/daas/motl/dim/dim_usertag_msk_d/day_id=$day_id")

    // 增加分区
    spark
      .sql(
        s"""
           |alter table dim.dim_usertag_msk_d add if not exists partition (day_id=$day_id)
           |""".stripMargin)

  }

  /**
   * spark-submit --master yarn-client --class com.shujia.dim.DimUsertagMskDay --jars common-1.0.jar dim-1.0.jar 20220815
   */
}
