package DianShang_2024.ds_06.indicator

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, lit}

object trait03 {
  def main(args: Array[String]): Unit = {
    /*
          3、请根据dwd_ds_hudi层的相关表，计算2020年销售量前10的商品，销售额前10的商品，存入ClickHouse数据库shtd_result的topten表中（
          表结构如下），然后在Linux的ClickHouse命令行中根据排名升序排序，查询出前5条，将SQL语句复制粘贴至客户端桌面【Release\任务B提交结
          果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下;
     */
    //  准备spark集成hudi的环境
    val spark= SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第三题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.SparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    //  准备fact_order_detail表在hdfs上面的路径
    val fact_order_detail_path="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi.db/fact_order_detail"

    //  拿到最新的数据
     spark.read.format("hudi").load(fact_order_detail_path)
       .where(col("etl_date")===lit("20240101"))
       .createOrReplaceTempView("temp01")

    //  首先将2020年销售量前10的数据分析出来
    spark.sql(
      """
        |select
        |sku_id,
        |sku_name,
        |sum(sku_num) as number,
        |row_number() over(order by sum(sku_num) desc ) as row
        |from temp01
        |where Year(create_time)=2020
        |group by sku_id,sku_name
        |order by number desc
        |limit 10
        |""".stripMargin).createOrReplaceTempView("tb1")


    //  然后将2020年销售额前10的数据分析出来 sell:销售
    //  不知道这里为什么这下面在分组后面加上order by sum(sku_num) * order_price desc就会报错，但是不加也会按照row排序，所以不加也不影响
    spark.sql(
      """
        |select
        |sku_id,
        |sku_name,
        |sum(sku_num) * order_price as sell_value,
        |row_number() over(order by sum(sku_num) * order_price desc) as row
        |from temp01
        |where Year(create_time)=2020
        |group by sku_id,sku_name,order_price
        |limit 10
        |""".stripMargin).createOrReplaceTempView("tb2")

    //  将两张表根据row连接起来
    val result=spark.sql(
      """
        |select
        |tb1.sku_id as topquantityid,
        |tb1.sku_name as topquantityname,
        |tb1.number as topquantity,
        |tb2.sku_id as toppriceid,
        |tb2.sku_name as toppricename,
        |tb2.sell_value as topprice,
        |tb1.row as sequence
        |from tb1
        |join tb2 on tb1.row=tb2.row
        |""".stripMargin)

    //  将数据写入clickhouse
  result.write.format("jdbc")
    .option("url","jdbc:clickhouse://192.168.40.110:8123/shtd_result06")
    .option("user","default")
    .option("password","")
    .option("driver","com.clickhouse.jdbc.ClickHouseDriver")
    .option("dbtable","topten")
    .mode("append")
    .save()

    //  关闭环境
    spark.close()
  }

}
