package ods_industry_2024.ods_02.indicator_count_hudi.indicator_02

import org.apache.hudi.DataSourceWriteOptions.{PARTITIONPATH_FIELD, PRECOMBINE_FIELD, RECORDKEY_FIELD}
import org.apache.hudi.QuickstartUtils.getQuickstartWriteConfigs
import org.apache.spark.sql.SparkSession

object test_02 {
  def main(args: Array[String]): Unit = {
    /*
    2、根据dwd层表统计每人每天下单的数量和下单的总金额，存入Hudi的dws_ds_hudi层的user_consumption_day_aggr表中（表结构如下），然后使
    用spark -shell按照客户主键、订单总金额均为降序排序，查询出前5条，将SQL语句复制粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号
    下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下；

    字段	类型	中文含义	备注
    uuid	string	随机字符	随机字符，保证不同即可，作为primaryKey
    user_id	int	客户主键
    user_name	string	客户名称
    total_amount	double	订单总金额	当天订单总金额。
    total_count	int	订单总数	当天订单总数。同时可作为preCombineField（作为合并字段时，无意义，因为主键为随机生成）
    year	int	年	订单产生的年,为动态分区字段
    month	int	月	订单产生的月,为动态分区字段
    day	int	日	订单产生的日,为动态分区字段

     */

    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("第二套卷子指标第二题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()


    //  拿到最新分区表的数据
    spark.table("dwd_ds_hudi_02.fact_order_info").createOrReplaceTempView("temp01")
    spark.table("dwd_ds_hudi_02.fact_order_info")
    .where("etl_date=(select max(etl_date) from temp01) ")
      .distinct()
      .createOrReplaceTempView("order_info")

    spark.table("dwd_ds_hudi_02.dim_user_info").createOrReplaceTempView("temp02")

      spark.table("dwd_ds_hudi_02.dim_user_info")
      .where("etl_date=(select max(etl_date) from temp02)")
        .distinct()
        .createOrReplaceTempView("user_info")

    val result=spark.sql(
      """
        |select
        |uuid() as uuid,
        |o.user_id,
        |u.name as user_name,
        |sum(o.final_total_amount)
        |over(partition by Year(to_date(o.create_time,"yyyyMMdd")),Month(to_date(o.create_time,"yyyyMMdd")),Day(to_date(o.create_time,"yyyyMMdd")),o.user_id)
        |as total_amount,
        |count(*)
        |over(partition by Year(to_date(o.create_time,"yyyyMMdd")),Month(to_date(o.create_time,"yyyyMMdd")),Day(to_date(o.create_time,"yyyyMMdd")),o.user_id)
        |as total_count,
        |Year(to_date(o.create_time,"yyyyMMdd")) as year,
        |Month(to_date(o.create_time,"yyyyMMdd")) as month,
        |Day(to_date(o.create_time,"yyyyMMdd")) as day
        |from order_info as o
        |join user_info as u
        |on u.id=o.user_id
        |""".stripMargin)

    result.show

    spark.sql("use hudi_indicator")
    spark.sql("drop table if exists user_consumption_day_aggr_02")
    spark.sql(
      """
        |create table if not exists user_consumption_day_aggr_02(
        |uuid string,
        |user_id int,
        |user_name string,
        |total_amount double,
        |total_count int
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="uuid",
        |preCombineField="total_count",
        |hoodie.datasource.hive_aync.mode="hms"
        |)
        |partitioned by(year int,month int,day int)
        |""".stripMargin)

    result.write.mode("append")
      .format("hudi")
      .options(getQuickstartWriteConfigs)
      .option(RECORDKEY_FIELD.key(),"uuid")
      .option(PRECOMBINE_FIELD.key(),"total_count")
      .option(PARTITIONPATH_FIELD.key(),"year,month,day")
      .option("hoodie.table.name","user_consumption_day_aggr_02")
      .save("hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_indicator.db/user_consumption_day_aggr_02")

    println("完成")



    spark.close()
  }

}
