package ds_industry_2025.ds.ds_06.T3

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
/*
    4、请根据dwd_ds_hudi层的相关表，计算出2020年每个省份所在地区的订单金额的中位数,存入ClickHouse数据库shtd_result的
    nationmedian表中（表结构如下），然后在Linux的ClickHouse命令行中根据地区表主键，省份表主键均为升序排序，查询出前5条，
    将SQL语句复制粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任
    务B提交结果.docx】中对应的任务序号下；
 */
object t4 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t4")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val region_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi.db/dim_region"
    val province_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi.db/dim_province"
    val order_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi.db/fact_order_info"

    spark.read.format("hudi").load(region_path).createOrReplaceTempView("region")
    spark.read.format("hudi").load(province_path).createOrReplaceTempView("province")
    spark.read.format("hudi").load(order_path).createOrReplaceTempView("order")

    spark.read.format("hudi").load(region_path)
      .where("etl_date=(select max(etl_date) from region)")
      .createOrReplaceTempView("r")
    spark.read.format("hudi").load(province_path)
      .where("etl_date=(select max(etl_date) from province)")
      .createOrReplaceTempView("p")

    spark.read.format("hudi").load(order_path)
      .where("etl_date=(select max(etl_date) from order)")
      .where(year(col("create_time"))=== 2020)
      .createOrReplaceTempView("o")


    val result = spark.sql(
      """
        |select distinct
        |o.province_id as provinceid,
        |p.name  as provincename,
        |p.region_id as regionid,
        |r.region_name as regionname,
        |percentile_approx(o.final_total_amount,0.5)
        |over(partition by p.region_id,r.region_name,o.province_id,p.name) as provincemedian,
        |percentile_approx(o.final_total_amount,0.5) over(partition by p.region_id,r.region_name) as regionmedian
        |from o
        |join p on p.id=o.province_id
        |join r on r.id=p.region_id
        |""".stripMargin)

    result.write.format("jdbc")
      .option("url","jdbc:clickhouse://192.168.40.110:8123/shtd_result")
      .option("user","default")
      .option("password","")
      .option("driver","com.clickhouse.jdbc.ClickHouseDriver")
      .option("dbtable","nationmedian")
      .mode("append")
      .save()

    // todo select * from nationmedian order by regionid,provinceid limit 5;


    spark.close()
  }

}
