package com.ctyun.dim

import com.shujia.utils.SparkTool
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object DimUsertagMskDay extends SparkTool {
  override def run(spark: SparkSession): Unit = {
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 从ods层接入用户画像表数据 并对敏感字段进行脱敏
    val userTagDF: DataFrame = spark.table("ods.ods_usertag_d")

    userTagDF
      // 取特定的分区
      .where($"day_id" === day_id)
      // 对特定的字段进行脱敏操作
      .select(
        upper(md5(concat($"mdn", expr("'shujia'")))) as "mdn"
        , upper(md5(concat($"name", expr("'shujia'")))) as "name"
        , $"gender"
        , $"age"
        , upper(md5(concat($"id_number", expr("'shujia'")))) as "id_number"
        , $"number_attr"
        , $"trmnl_brand"
        , $"trmnl_price"
        , $"packg"
        , $"conpot"
        , $"resi_grid_id"
        , $"resi_county_id"
      )
      // 将脱敏后的数据写入对应的目录及分区中
      .write
      .format("csv")
      .option("sep", "\t")
      .mode(SaveMode.Overwrite)
      .save(s"/daas/motl/dim/dim_usertag_msk_d/day_id=$day_id")

    /**
     * 增加分区
     */

    spark.sql(
      s"""
         |alter table dim.dim_usertag_msk_d  add if not exists partition(day_id='$day_id')
         |""".stripMargin)

  }
}
