package ds_industry_2025.industry.gy_10.T3

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, concat_ws}
/*
    3、编写scala代码，使用Spark根据hudi_gy_dws层表machine_data_total_time，计算每日运行时长前三的设备（若存在运行时长相同的
    数据时应全部输出，例如有两条并列第二，则第三名次不变，总共输出四条数据）。将计算结果写入ClickHouse数据库shtd_industry
    的machine_data_total_time_top3表中（表结构如下），然后在Linux的ClickHouse命令行中根据查询所有数据，将SQL语句复制粘贴至
    客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中
    对应的任务序号下；
   machine_data_total_time_top3：
 */
object t3 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t3")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val path="hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dws.db/machine_data_total_time"

    spark.read.format("hudi").load(path)
      .where(col("total_time")!==0)
      .createOrReplaceTempView("data")


    //  todo 然后下面的case这里因为用到了row字段，依赖于它，但是row又没有放到grouBy里面去也没有拿聚合函数聚合所以会导致报错
    //  todo 所以下面我们使用max()函数聚合一下
    val result = spark.sql(
        """
          |select distinct
          |machine_record_date,
          |collect_list(case when row=1 then machine_id end) as first_id,
          |collect_list(case when row=2 then machine_id end) as second_id,
          |collect_list(case when row=3 then machine_id end) as tertiary_id,
          |max(case when row=1 then total_amount end) as first_time,
          |max(case when row=2 then total_amount end) as second_time,
          |max(case when row=3 then total_amount end) as tertiary_time
          |from(
          |select distinct
          |machine_record_date,
          |machine_id,
          |total_amount,
          |dense_rank() over(partition by machine_record_date order by total_amount desc ) as row
          |from data
          |) as r1
          |where row < 4
          |group by machine_record_date
          |""".stripMargin)
      .withColumn(
        "first_id",
        concat_ws(",", col("first_id"))
      )
      .withColumn(
        "second_id",
        concat_ws(",", col("second_id"))
      )
      .withColumn("tertiary_id", concat_ws(",", col("tertiary_id")))

    result.write.format("jdbc")
      .option("url","jdbc:clickhouse://192.168.40.110:8123/shtd_industry")
      .option("user","default")
      .option("password","")
      .option("driver","com.clickhouse.jdbc.ClickHouseDriver")
      .option("dbtable","machine_data_total_time_top3")
      .mode("append")
      .save()

    /*
        // 行转列
    val resultDF = rankedDF
      .groupBy("machine_record_date")
      .agg(
        max(when(col("row") === 1, col("machine_id"))).as("first_id"),
        max(when(col("row") === 2, col("machine_id"))).as("second_id"),
        max(when(col("row") === 3, col("machine_id"))).as("tertiary_id"),
        max(when(col("row") === 1, col("total_time"))).as("first_time"),
        max(when(col("row") === 2, col("total_time"))).as("second_time"),
        max(when(col("row") === 3, col("total_time"))).as("tertiary_time")
      )
      .withColumnRenamed("machine_record_date", "date_day")
     */



    spark.close()

  }

}
