package DianShang_2024.ds_07.indicator

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.col

object indicator02 {
  def main(args: Array[String]): Unit = {
    /*
          2、根据dwd层表统计每个省每月下单的数量和下单的总金额，并按照year，month，region_id进行分组,按照total_amount降序排序，形
          成sequence值，将计算结果存入Hive的dws数据库的province_consumption_day_aggr表中（表结构如下），然后使用hive cli根据订单总
          数、订单总金额、省份表主键均为降序排序，查询出前5条，在查询时对于订单总金额字段将其转为bigint类型（避免用科学计数法展示），将SQL语句复
          制粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对
          应的任务序号下;
     */
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第二题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
//      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
//      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    spark.sql("use dwd07")

      //  由于之前清洗数据时候出现了一点问题，有一次create_time清洗进去的值全是null，所以下面要多做一个判断
      spark.table("dwd07.fact_order_info")
        .where(col("create_time").isNotNull)
        .where("etl_date=(select max(etl_date) from fact_order_info)")
        .dropDuplicates()
        .createOrReplaceTempView("order_info")

    spark.table("dwd07.dim_province")
      .where("etl_date=(select max(etl_date) from dim_province)")
      .dropDuplicates()
      .createOrReplaceTempView("province")

    spark.table("dwd07.dim_region")
      .where("etl_date=(select max(etl_date) from dim_region)")
      .dropDuplicates()
      .createOrReplaceTempView("region")

    spark.sql(
      """
        |select
        |r1.province_id,r1.province_name,
        |cast(r1.region_id as int),
        |r1.region_name,total_amount,total_count,
        |row_number() over(partition by r1.year,r1.month,r1.region_id order by r1.total_amount desc) as sequence,
        |year,month
        |from(
        |select distinct
        |o.province_id,
        |p.name as province_name,
        |r.id as region_id,
        |r.region_name,
        |sum(final_total_amount)
        |over(partition by o.province_id,Year(date_format(o.create_time,"yyyy-MM")),Month(date_format(o.create_time,"yyyy-MM"))) as total_amount,
        |count(*)
        |over(partition by o.province_id,Year(date_format(o.create_time,"yyyy-MM")),Month(date_format(o.create_time,"yyyy-MM"))) as total_count,
        |Year(date_format(o.create_time,"yyyy-MM")) as year,
        |Month(date_format(o.create_time,"yyyy-MM")) as month
        |from order_info as o
        |join province as p
        |on p.id=o.province_id
        |join region as r
        |on r.id=p.region_id
        |) as r1
        |""".stripMargin).createOrReplaceTempView("result")


    spark.sql("drop table if exists dws07.province_consumption_day_aggr07")
    spark.sql(
      """
        |create table if not exists dws07.province_consumption_day_aggr07(
        |province_id int,
        |province_name string,
        |region_id int,
        |region_name string,
        |total_amount double,
        |total_count int,
        |sequence int
        |)
        |partitioned by(year int,month int)
        |""".stripMargin)

    spark.sql(
      """
        |insert into table dws07.province_consumption_day_aggr07
        |partition(year,month)
        |select * from result
        |""".stripMargin)





    spark.close()

    }

}
