package ds_industry_2025.industry.gy_10.T3

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._

/*
      3、编写scala代码，使用Spark根据hudi_gy_dws层表machine_data_total_time，计算每日运行时长前三的设备
      （若存在运行时长相同的数据时应全部输出，例如有两条并列第二，则第三名次不变，总共输出四条数据）。将计算结果写
      入ClickHouse数据库shtd_industry的machine_data_total_time_top3表中（表结构如下），然后在Linux的ClickHouse命令
      行中根据查询所有数据，将SQL语句复制粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘
      贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下；
   machine_data_total_time_top3：
 */
object t6 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t4")
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions", "org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .config("spark.sql.legacy.parquet.datetimeRebaseModeInRead", "LEGACY")
      .enableHiveSupport()
      .getOrCreate()


    spark.table("hudi_gy_dws.machine_data_total_time")
      .createOrReplaceTempView("data")

    val result = spark.sql(
      """
        |select distinct
        |machine_record_date,
        |collect_list(case when row=1 then machine_id end) as first_id,
        |collect_list(case when row=2 then machine_id end) as second_id,
        |collect_list(case when row=3 then machine_id end) as tertiary_id,
        |max(case when row=1 then total_amount end) as first_time,
        |max(case when row=2 then total_amount end) as second_time,
        |max(case when row=3 then total_amount end) as tertiary_time
        |from(
        |select distinct
        |machine_record_date,
        |machine_id,
        |total_amount,
        |dense_rank() over(partition by machine_record_date order by total_amount desc ) as row
        |from data
        |) as r1
        |where row < 4
        |group by machine_record_date
        |""".stripMargin)
      .withColumn(
        "first_id",
        concat_ws(",",col("first_id"))
      )
      .withColumn(
        "second_id",
        concat_ws(",",col("second_id"))
      )
      .withColumn("tertiary_id",concat_ws(",",col("tertiary_id")))

    result.show




    spark.close()
  }

}
