package DianShang_2024.ds_06.indicator

import org.apache.commons.lang.RandomStringUtils
import org.apache.hudi.DataSourceWriteOptions.{PARTITIONPATH_FIELD, PRECOMBINE_FIELD, RECORDKEY_FIELD}
import org.apache.hudi.QuickstartUtils.getQuickstartWriteConfigs
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

object trait02 {
  def main(args: Array[String]): Unit = {
    /*
          2、根据dwd_ds_hudi层表统计每个省每月下单的数量和下单的总金额，并按照year，month，region_id进行分组,按照total_amount降序排序，
          形成sequence值，将计算结果存入Hudi的dws_ds_hudi数据库province_consumption_day_aggr表中（表结构如下），然后使用
          spark-shell根据订单总数、订单总金额、省份表主键均为降序排序，查询出前5条，在查询时对于订单总金额字段将其转为bigint类型
          （避免用科学计数法展示），将SQL语句复制粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下，
          将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下;
     */
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第二题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    //  将需要用到dwd层的表的hdfs路径定义(根据题目需要的字段来判断需要用到哪几张表)
    val fact_order_info_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi.db/fact_order_info"
    val dim_province_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi.db/dim_province"
    val dim_region_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi.db/dim_region"

    //  将三张表的数据读取到并且创建临时表
  spark.read.format("hudi").load(fact_order_info_path)
    .where(col("etl_date")==="20240101")
    .createOrReplaceTempView("t1")

  spark.read.format("hudi").load(dim_province_path)
    .where(col("etl_date")==="20240101")
    .createOrReplaceTempView("t2")

  spark.read.format("hudi").load(dim_region_path)
    .where(col("etl_date")==="20240101")
    .createOrReplaceTempView("t3")



    //  为每一条订单生成一个uuid
    //  spark.udf.register:注册函数，这样注册的函数可以在sparksql和dataframe当作内置函数使用
    //  generate_uuid:是这个函数的名字(generate:生成)
    //   RandomStringUtils.randomAlphanumeric(1):用于生成一个指定长度的随机字母数字字符串。但在这里，它被用于生成一个长度为 1 的字符串，
    //   这意味着它只会生成一个随机的字母或数字。
    spark.udf.register("generate_uuid",()=>{
      RandomStringUtils.randomAlphanumeric(1)
    })



    //  指标计算t1:fact_order_info   t2:dim_province   t3:dim_region

    // temp01:将字段的数据都拿到
    spark.sql(
      """
        |select
        |final_total_amount,
        |t1.province_id as province_id,
        |t2.name as province_name,
        |t3.id as region_id,
        |t3.region_name as region_name,
        |Year(t1.create_time) as year,
        |Month(t1.create_time) as month
        |from t1
        |join t2 on t1.province_id=t2.id
        |join t3 on t2.region_id=t3.id
        |""".stripMargin).createOrReplaceTempView("temp01")

    //  将每个省份每年每月的订单数量和订单总额得到，并且将uuid字段添加上去,然后根据year,month,region_id进行分组，使用row_number得到sequence
    //  上面注册的那个udf函数可以使用下面的uuid函数直接替代掉
    spark.sql(
      """
        |select
        |uuid() as uuid,
        |province_id,
        |province_name,
        |region_id,
        |region_name,
        |sum(final_total_amount) as total_amount,
        |count(*) as total_count,
        |row_number() over(partition by year,month,region_id order by sum(final_total_amount) desc ) as sequence,
        |year,
        |month
        |from temp01
        |group by province_id,province_name,region_id,region_name,year,month
        |""".stripMargin).createOrReplaceTempView("result")

    //  将结果转换成dataframe，方便写入
    val result=spark.sql("select * from result")
    //  创建province_consumption_day_aggr表
    spark.sql("use dws_ds_hudi")
    spark.sql("drop table if exists province_consumption_day_aggr")
    spark.sql(
      """
        |create table if not exists  province_consumption_day_aggr(
        |uuid String,
        |province_id int,
        |province_name String,
        |region_id int,
        |region_name String,
        |total_amount decimal(16,8),
        |total_count int,
        |sequence int,
        |year int,
        |month int
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="uuid",
        |preCombineField="total_count",
        |hoodie.datasource.hive_aync.mode="hms"
        |)
        |partitioned by(year,month)
        |""".stripMargin)



    //  将结果写入目标数据库
    result.withColumn("region_id",col("region_id").cast("int"))
      .write.mode("append").format("hudi")
      .options(getQuickstartWriteConfigs)
      .option(PRECOMBINE_FIELD.key(),"total_count")
      .option(RECORDKEY_FIELD.key(),"uuid")
      .option(PARTITIONPATH_FIELD.key(),"year,month")
      .option("hoodie.table.name","province_consumption_day_aggr")
      .save("hdfs://192.168.40.110:9000/user/hive/warehouse/dws_ds_hudi.db/province_consumption_day_aggr")


    //  关闭环境
    spark.close()
  }

}
