package ds_industry_2025.ds.ds_03.T3

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

/*
      4、根据dws层表来计算每个地区2020年订单金额前3省份，依次存入MySQL数据库shtd_result的regiontopthree表中（表结构如下），
      然后在Linux的MySQL命令行中根据地区表主键升序排序，查询出前5条，将SQL语句复制粘贴至客户端桌面【Release\任务B提交结果.docx】
      中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下；
 */
object t5 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t5")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    spark.table("dwd.dim_province")
      .where("etl_date=(select max(etl_date) from dwd.dim_province)")
      .createOrReplaceTempView("p")

    spark.table("dwd.dim_region")
      .where("etl_date=(select max(etl_date) from dwd.dim_region)")
      .createOrReplaceTempView("r")

    spark.table("dwd.fact_order_info")
      .where(year(col("create_time"))=== 2020)
      .createOrReplaceTempView("o")


    val result = spark.sql(
        """
          |select
          |*
          |from(
          |select distinct
          |region_id,region_name,
          |province_id,province_name,
          |total_amount,
          |row_number() over(partition by region_id,region_name order by total_amount desc) as row
          |from(
          |select distinct
          |p.region_id,
          |r.region_name,
          |o.province_id,
          |p.name as province_name,
          |round(sum(o.final_total_amount) over(partition by p.region_id,r.region_name,o.province_id,p.name)) as total_amount
          |from o
          |join p on p.id=o.province_id
          |join r on r.id=p.region_id
          |) as r1
          |) as r2
          |where row < 4
          |""".stripMargin).drop("row")
      .groupBy("region_id", "region_name")
      .agg(
        concat_ws(",", collect_list(col("province_id"))).as("province_ids"),
        concat_ws(",", collect_list(col("province_name"))).as("province_names"),
        concat_ws(",", collect_list(col("total_amount"))).as("province_amounts")
      )
      .select("region_id", "region_name", "province_ids", "province_names", "province_amounts")
      .distinct()


    result.show(false)
//   




    spark.close()

  }

}
