package DianShang_2024.ds_02.indicator



import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.functions.substring
object indicator04 {
  def main(args: Array[String]): Unit = {
    /*
        请根据dws_ds_hudi库中的表计算出每个省份2020年4月的平均订单金额和该省所在地区平均订单金额相比较结果（“高/低/相同”）,存入ClickHouse数据
        库shtd_result的provinceavgcmpregion表中（表结构如下），然后在Linux的ClickHouse命令行中根据省份表主键、省平均订单金额、地区平均订
        单金额均为降序排序，查询出前5条，将SQL语句复制粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户
        端桌面【Release\任务B提交结果.docx】中对应的任务序号下
     */
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第四题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()



    //  这里题目要求是从dws层，但是我从dwd层去做了，所以下面是dwd层的做法

    val order_info_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi02.db/fact_order_info"
    val province_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi02.db/dim_province"
    val region_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi02.db/dim_region"

    val temp01=spark.read.format("hudi").load(order_info_path)
    temp01.createOrReplaceTempView("temp01")
    spark.read.format("hudi").load(order_info_path)
      .where("etl_date=(select max(etl_date) from temp01)")
      .dropDuplicates()
      .where(
        substring(col("create_time"),1,6)==="202004"
      )
      .createOrReplaceTempView("order_info")


    val temp02=spark.read.format("hudi").load(province_path)
    temp02.createOrReplaceTempView("temp02")
    spark.read.format("hudi").load(province_path)
      .where("etl_date=(select max(etl_date) from temp02)")
      .dropDuplicates()
      .createOrReplaceTempView("province")

    val temp03=spark.read.format("hudi").load(region_path)
    temp03.createOrReplaceTempView("temp03")
    spark.read.format("hudi").load(region_path)
      .where("etl_date=(select max(etl_date) from temp03)")
      .dropDuplicates()
      .createOrReplaceTempView("region")


    spark.sql(
      """
        |select   distinct
        |*
        |from(
        |select
        |provinceid,
        |provincename,
        |provinceavgconsumption,
        |regionid,
        |regionname,
        |regionavgconsumpion,
        |case
        |  when provinceavgconsumption > regionavgconsumpion then "高"
        |  when provinceavgconsumption < regionavgconsumpion then "低"
        |  else "相同"
        |end
        | as comparison
        |from(
        |select
        |t1.province_id as provinceid,
        |t2.name as provincename,
        |round(avg(t1.final_total_amount) over(partition by t1.province_id,t2.name),2) as provinceavgconsumption,
        |t3.id  as regionid,
        |t3.region_name as regionname,
        |round(avg(t1.final_total_amount) over(partition by t3.id,t3.region_name),2) as regionavgconsumpion
        |from order_info as t1
        |join province as t2
        |on t2.id=t1.province_id
        |join region as t3
        |on t3.id=t2.region_id
        |) as r1
        |) as r2
        |""".stripMargin).show()






    //  下面是dws层的做法
    val dws_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dws_ds_hudi02.db/province_consumption_day_aggr"

    spark.read.format("hudi").load(dws_path)
      .where(col("year")===2020 and col("month")===4)
      .dropDuplicates()
      .createOrReplaceTempView("dws")

    spark.sql(
      """
        |select
        |region_id,
        |province_id,
        |province_name,
        |ceil(total_amount/total_count) as  province_avg
        |from dws
        |""".stripMargin).createOrReplaceTempView("r1")


    spark.sql(
      """
        |select
        |region_id,
        |region_name,
        |ceil(sum(total_amount)/sum(total_count)) as region_avg
        |from dws
        |group by region_id,region_name
        |""".stripMargin).createOrReplaceTempView("r2")

    val result=spark.sql(
      """
        |select
        |r1.province_id as provinceid,
        |r1.province_name as provincename,
        |r1.province_avg as provinceavgconsumption,
        |r2.region_id as regionid,
        |r2.region_name as regionname,
        |r2.region_avg as regionavgconsumption,
        |case
        |when r1.province_avg > r2.region_avg then "高"
        |when r1.province_avg < r2.region_avg then "低"
        |else "相同"
        |end as comparison
        |from r1
        |join r2
        |on r2.region_id=r1.region_id
        |""".stripMargin)

    //  这里为了区分其他卷子的数据库，将这题的结果写到shtd_result_hudi02数据库(在写入数据之前，clickhouse的表需要提前创建好)
    result.write.format("jdbc")
      .option("url","jdbc:clickhouse://192.168.40.110:8123/shtd_result_hudi02")
      .option("user","default")
      .option("password","")
      .option("driver","com.clickhouse.jdbc.ClickHouseDriver")
      .option("dbtable","provinceavgcmpregion")
      .mode("append")
      .save()

    println("写入完成")











    spark.close()
  }

}
