package ods_industry_2024.gy_10.indicator

import org.apache.spark.sql.SparkSession

object indicator03 {
  def main(args: Array[String]): Unit = {
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第三题")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .config("spark.sql.legacy.avro.datetimeRebaseModeInWrite","LEGACY")
      .config("spark.sql.legacy.parquet.datetimeRebaseModeRead","LEGACY")
      .enableHiveSupport()
      .getOrCreate()

    spark.table("hudi_gy_dws10.machine_data_total_time")
      .createOrReplaceTempView("data")


  //  下面之所以要使用max是因为具体来说，Spark SQL 需要在使用 GROUP BY 时，所有非聚合的列都必须包含在 GROUP BY 子句
    //  中。你在聚合操作中使用了 row 列，但是没有将它包含在 GROUP BY 中，或者你没有用聚合函数（比如 FIRST()、MAX() 等
    //  ）来处理它。所以下面我们是使用了聚合函数来处理，这样子方便一些
    val result=spark.sql(
      """
        |select
        |r2.machine_record_date as date_day,
        |max(case when r2.row=1 then r2.machine_id end)  as first_id,
        |max(case when r2.row=2 then r2.machine_id end) as second_id,
        |max(case when r2.row=3 then r2.machine_id end) as tertiary_id,
        |max(case when r2.row=1 then r2.total_time end) as first_time,
        |max(case when r2.row=2 then r2.total_time end) as second_time,
        |max(case when r2.row=3 then r2.total_time end) as tertiary_time
        |from(
        |select
        |*
        |from(
        |select
        |d.machine_record_date,
        |d.machine_id,
        |dense_rank() over(partition by d.machine_record_date order by d.total_time desc) as row,
        |d.total_time
        |from data as d
        |) as r1
        |where row < 3 or row=3
        |) as r2
        |group by r2.machine_record_date
        |""".stripMargin)

//  下面是不使用group by 的方法，使用开窗，然后开窗里面over前面必须使用排序函数或者聚合函数
    spark.sql(
      """
        |select distinct
        |r2.machine_record_date,
        |max(case when r2.row=1 then r2.machine_id end) over(partition by r2.machine_record_date)  as first_id,
        |max(case when r2.row=2 then r2.machine_id end) over(partition by r2.machine_record_date) as second_id,
        |max(case when r2.row=3 then r2.machine_id end) over(partition by r2.machine_record_date) as tertiary_id,
        |max(case when r2.row=1 then r2.total_time end) over(partition by r2.machine_record_date) as first_time,
        |max(case when r2.row=2 then r2.total_time end) over(partition by r2.machine_record_date) as second_time,
        |max(case when r2.row=3 then r2.total_time end) over(partition by r2.machine_record_date) as tertiary_time
        |from(
        |select
        |*
        |from(
        |select
        |d.machine_record_date,
        |d.machine_id,
        |dense_rank() over(partition by d.machine_record_date order by d.total_time desc) as row,
        |d.total_time
        |from data as d
        |) as r1
        |where row < 3 or row=3
        |) as r2
        |""".stripMargin).show



    result.write
      .format("jdbc")
      .option("url","jdbc:clickhouse://192.168.40.110:8123/shtd_result")
      .option("user","default")
      .option("password","")
      .option("driver","com.clickhouse.jdbc.ClickHouseDriver")
      .option("dbtable","machine_data_total_time_top3")
      .mode("append")
      .save



    println("完成")
    spark.close()
  }

}
