package industry_2024.industry_10.indicator

import org.apache.spark.sql.SparkSession

object indicator03 {
  def main(args: Array[String]): Unit = {
    /*
          3、编写scala代码，使用Spark根据hudi_gy_dws层表machine_data_total_time，计算每日运行时长前三的设备（若存在运行时长相同的数据时应全
          部输出，例如有两条并列第二，则第三名次不变，总共输出四条数据）。将计算结果写入ClickHouse数据库shtd_industry的
          machine_data_total_time_top3表中（表结构如下），然后在Linux的ClickHouse命令行中根据查询所有数据，将SQL语句复制粘贴至客户端桌面
          【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下；
       machine_data_total_time_top3：
     */

    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第三题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val dws_path="hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dws10.db/machine_data_total_time"

    spark.read.format("hudi").load(dws_path)
      .dropDuplicates()
      .createOrReplaceTempView("data")

    val result=spark.sql(
      """
        |select
        |r1.machine_record_date as date_day,
        |max(case when row=1 then r1.machine_id end) as first_id,
        |max(case when row=2 then r1.machine_id end) as second_id,
        |max(case when row=3 then r1.machine_id end) as tertiary_id,
        |max(case when row=1 then r1.total_time end) as first_time,
        |max(case when row=2 then r1.total_time end) as second_time,
        |max(case when row=3 then r1.total_time end) as  tertiary_time
        |from(
        |select
        |d.machine_record_date,
        |d.machine_id,
        |d.total_time,
        |rank() over(partition by d.machine_record_date order by d.total_time desc ) as row
        |from data as d
        |) as r1
        |where row <=3
        |group by r1.machine_record_date
        |""".stripMargin)

    //  写入clickhouse
    result.write
      .format("jdbc")
      .option("url","jdbc:clickhouse://192.168.40.110:8123/shtd_industry")
      .option("user","default")
      .option("password","")
      .option("driver","com.clickhouse.jdbc.ClickHouseDriver")
      .option("dbtable","machine_data_total_time_top3_10")
      .mode("append")
      .save()

    println("写入完成")












    spark.close()
  }

}
