package ds_industry_2025.ds.ds_06.T3

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

/*
      3、请根据dwd_ds_hudi层的相关表，计算2020年销售量前10的商品，销售额前10的商品，存入ClickHouse数据库shtd_result
      的topten表中（表结构如下），然后在Linux的ClickHouse命令行中根据排名升序排序，查询出前5条，将SQL语句复制粘贴至客户端
      桌面【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对
      应的任务序号下;
 */
object t7 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t1")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hdui.HoodieSparKSessionExtension")
      .config("spark.sql.parquetLegacyFormat","true")
      .enableHiveSupport()
      .getOrCreate()

    val hdfs="hdfs://192.168.40.110:9000/user/hive/warehouse/dwd_ds_hudi.db/fact_order_detail"

    spark.read.format("hudi").load(hdfs)
      .where(year(col("create_time")) === 2020)
      .createOrReplaceTempView("data")

    spark.sql("select * from data limit 1").show
    //  todo 销售额前10
    val r1 = spark.sql(
      """
        |select distinct
        |*,
        |row_number() over(partition by sku_id,sku_name order by total_count desc) as row
        |from(
        |select distinct
        |sku_id,
        |sku_name,
        |sum(sku_number) over(partition by sku_id,sku_name) as total_count
        |from data
        |) as
        |limit 10
        |""".stripMargin)





//    spark.sql(
//      """
//        |select
//        |
//        |from data
//        |""".stripMargin)

    spark.close()

  }

}
