package ds_industry_2025.ds.ds_03.T3

import org.apache.spark.sql.SparkSession
/*
    2、根据dwd层表统计每个省每月下单的数量和下单的总金额，并按照year，month，region_id进行分组,按照total_amount降序排序，形成
    sequence值，将计算结果存入Hive的dws数据库province_consumption_day_aggr表中（表结构如下），然后使用hive cli根据订单总
    数、订单总金额、省份表主键均为降序排序，查询出前5条，在查询时对于订单总金额字段将其转为bigint类型（避免用科学计数法展示），
    将SQL语句复制粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任
    务B提交结果.docx】中对应的任务序号下;
 */
object t2 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t2")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    spark.table("dwd.fact_order_info")
      .where("etl_date=(select max(etl_date) from dwd.fact_order_info)")
      .createOrReplaceTempView("o")

    spark.table("dwd.dim_region")
      .where("etl_date=(select max(etl_date) from dwd.dim_region)")
      .createOrReplaceTempView("r")

    spark.table("dwd.dim_province")
      .where("etl_date=(select max(etl_date) from dwd.dim_province)")
      .createOrReplaceTempView("p")

    val result = spark.sql(
      """
        |select distinct
        |province_id,province_name,
        |region_id,region_name,
        |total_amount,total_count,
        |row_number() over(partition by year,month,region_id,region_name order by total_amount desc) as sequence,
        |year,month
        |from(
        |select distinct
        |o.province_id,
        |p.name as province_name,
        |p.region_id,
        |r.region_name,
        |sum(o.final_total_amount)
        |over(partition by year(o.create_time),month(o.create_time),p.region_id,r.region_name,o.province_id,p.name) as total_amount,
        |count(*)
        |over(partition by year(o.create_time),month(o.create_time),p.region_id,r.region_name,o.province_id,p.name) as total_count,
        |year(o.create_time) as year,
        |month(o.create_time) as month
        |from o
        |join p on p.id=o.province_id
        |join r on r.id=p.region_id
        |) as r1
        |""".stripMargin)

    spark.sql("create database if not exists dws")

    result.write.mode("overwrite").format("hive")
      .saveAsTable("dws.province_consumption_day_aggr")

    // todo select  province_id,province_name,region_id,region_name,cast(total_amount as bigint),total_count,
    //  sequence,year,month  from province_consumption_day_aggr order by total_count desc,total_amount desc,province_id desc limit 5






    spark.close()
  }

}
