package DianShang_2024.ds_06.indicator

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, lit}

object trait04 {
  def main(args: Array[String]): Unit = {
    /*
          4、请根据dwd_ds_hudi层的相关表，计算出2020年每个省份所在地区的订单金额的中位数,存入ClickHouse数据库shtd_result
          的nationmedian表中（表结构如下），然后在Linux的ClickHouse命令行中根据地区表主键，省份表主键均为升序排序，查询出前5条，将SQL语句
          复制粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】
          中对应的任务序号下；
     */

    //  准备spark集成hudi的环境
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第四题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.SparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    //  准备表在hdfs上面的路径
    val fact_order_info_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi.db/fact_order_info"
    val dim_province_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi.db/dim_province"
    val dim_region_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi.db/dim_region"

    //  将最新分区的数据拿到
    spark.read.format("hudi").load(fact_order_info_path)
      .where(col("etl_date")===lit("20240101"))
      .createOrReplaceTempView("t1")
    spark.read.format("hudi").load(dim_province_path)
      .where(col("etl_date")===lit("20240101"))
      .createOrReplaceTempView("t2")
    spark.read.format("hudi").load(dim_region_path)
      .where(col("etl_date")===lit("20240101"))
      .createOrReplaceTempView("t3")


    //  使用percentile()函数   percentile:百分位    distinct:去重
    //  这里必须要加distinct进行去重，原因是因为窗口函数的工作方式，是在每一行上使用，然后可能是没有使用聚合函数的原因，所以就导致了重复数据
    val result=spark.sql(
      """
        |select
        |distinct
        |t1.province_id as provinceid,
        |t2.name as provincename,
        |t3.id as regionid,
        |t3.region_name as regionname,
        |percentile(t1.final_total_amount,0.5) over(partition by t2.id) as provincemedian,
        |percentile(t1.final_total_amount,0.5) over(partition by t3.id) as regionmedian
        |from t1
        |join t2 on t1.province_id=t2.id
        |join t3 on t2.region_id=t3.id
        |where Year(t1.create_time)=2020
        |""".stripMargin)

    //  将数据写入clickhouse
    result.write.format("jdbc")
      .option("url","jdbc:clickhouse://192.168.40.110:8123/shtd_result06")
      .option("user","default")
      .option("password","")
      .option("driver","com.clickhouse.jdbc.ClickHouseDriver")
      .option("dbtable","nationmedian")
      .mode("append")
      .save()

    //  关闭环境
    spark.close()
  }

}
