package DianShang_2024.ds_03.indicator

import org.apache.spark.sql.SparkSession

object trait02 {
  def main(args: Array[String]): Unit = {
    /*
          2、根据dwd层表统计每个省每月下单的数量和下单的总金额，并按照year，month，region_id进行分组,按照total_amount降序排序，形
          成sequence值，将计算结果存入Hive的dws数据库province_consumption_day_aggr表中（表结构如下），然后使用hive cli根据订单总数、订
          单总金额、省份表主键均为降序排序，查询出前5条，在查询时对于订单总金额字段将其转为bigint类型（避免用科学计数法展示），将SQL语句复制粘贴
          至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下;
     */
    //  准备环境
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第二题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .enableHiveSupport()
      .getOrCreate()

    spark.sql("use dwd03")

    //  创建在dws数据库里面的表
    spark.sql("drop table if exists dws03.province_consumption_day_aggr")
    spark.sql(
      """
        |create table if not exists dws03.province_consumption_day_aggr(
        |province_id int,
        |province_name string,
        |region_id int,
        |region_name string,
        |total_amount decimal(16,10),
        |total_count int,
        |sequence int,
        |year int,
        |month int
        |)
        |partitioned by(year,month)
        |""".stripMargin)

    //  1.sql的方式

    //  先将三张表的数据读取出来(并且去重)  distinct:去重   下面的分区选取最新的分区的值
    spark.sql(
      """
        |select
        |*
        |from(
        |select distinct * from dwd03.fact_order_info
        |where etl_date='20240101'
        |) as t1
        |""".stripMargin).createOrReplaceTempView("fact_order_info")

    spark.sql(
      """
        |select
        |*
        |from(
        |select distinct * from dwd03.dim_province
        |where etl_date='20240101'
        |) as t1
        |""".stripMargin).createOrReplaceTempView("dim_province")

    spark.sql(
      """
        |select
        |*
        |from(
        |select distinct * from dwd03.dim_region
        |where etl_date='20240101'
        |) as t1
        |""".stripMargin).createOrReplaceTempView("dim_region")

    //  连接三张表进行分析   substring()截取字符串，第二个参数为开始截取的位置，第一个位置索引是1（不是从0开始），第三个参数是截取的长度
    //  第一层拿到每一条订单的省份，年月，金额信息
    //  第二层拿到订单总数和订单总金额
    //
    spark.sql(
      """
        |select
        |province_id,
        |province_name,
        |cast(region_id as int),
        |region_name,
        |total_amount,
        |total_count,
        |row_number() over(partition by year,month,region_id order by total_amount desc ) as sequence,
        |cast(year as  int),
        |cast(month as int)
        |from(
        |select
        |province_id,
        |province_name,
        |region_id,
        |region_name,
        |year,
        |month,
        |count(*) as total_count,
        |sum(money) as total_amount
        |from(
        |select
        |t1.province_id as province_id,
        |t2.name as province_name,
        |t1.final_total_amount as money,
        |substring(t1.create_time,1,4) as year,
        |substring(t1.create_time,5,2) as month,
        |t2.region_id as region_id,
        |t3.region_name as region_name
        |from fact_order_info as t1
        |join dim_province  as t2   on t1.province_id=t2.id
        |join dim_region as t3      on  t2.region_id=t3.id
        |) as t1
        |group by province_id,province_name,region_id,region_name,year,month
        |) as t2
        |""".stripMargin).createOrReplaceTempView("result")

    spark.sql("select year from result").show

    //  插入数据
    spark.sql(
      """
        |insert into table dws03.province_consumption_day_aggr
        |partition(year,month)
        |select * from  result
        |""".stripMargin)


    //  关闭环境
    spark.close()
  }

}
