package industry_2024.industry_04.indicator

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.col

object indicator03 {
  def main(args: Array[String]): Unit = {
    /*
          编写Scala代码，使用Spark根据dwd_ds_hudi层的fact_change_record表关联dim_machine表统计每个车间中所有设备运行时长（即设备状态为“运行”）的
          中位数在哪个设备（为偶数时，两条数据原样保留输出），若某个设备运行状态当前未结束（即change_end_time值为空）则该状态不参与计算，计算结果存
          入ClickHouse数据库shtd_industry的machine_running_median表中（表结构如下），然后在Linux的ClickHouse命令行中根据所属车间、设备id均为
          降序排序，查询出前5条数据，将SQL语句复制粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌
          面【Release\任务B提交结果.docx】中对应的任务序号下
     */
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第三题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val hdfs_path="hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dwd04.db/fact_change_record"
    spark.read.format("hudi").load(hdfs_path)
      .where(col("changerecordstate")==="运行")
      .filter(col("changeendtime").isNotNull)
      .createOrReplaceTempView("fact_change_record")

    val dim_path="hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dwd04.db/dim_machine"
    spark.read.format("hudi").load(dim_path)
      .createOrReplaceTempView("dim_machine")


    //  然后这里必须要使percentile_approx,因为在处理大数据的时候percentile_approx是sparksql提供的更加适合
    val result=spark.sql(
      """
        |select distinct
        |f.changemachineid as machine_id,
        |d.machinefactory as machine_factory,
        |percentile_approx(
        | unix_timestamp(f.changeendtime) - unix_timestamp(f.changestarttime)
        | ,0.5 ) over(partition by d.machinefactory)
        | as total_running_time
        |from fact_change_record as f
        |join dim_machine as d
        |on d.basemachineid=f.changemachineid
        |""".stripMargin)

    result.write.mode("append")
      .format("jdbc")
      .option("url","jdbc:clickhouse://192.168.40.110:8123/shtd_industry")
      .option("user","default")
      .option("password","")
      .option("dbtable","machine_running_median04")
      .save()





    spark.close()
  }

}
