package ds_industry_2025.industry.gy_08.T3

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
/*
    4、编写Scala代码，使用Spark根据dwd层的fact_change_record表关联dim_machine表统计每个车间中所有设备运行时长
    （即设备状态为“运行”）的中位数在哪个设备（为偶数时，两条数据原样保留输出），若某个设备运行状态当前未结束
    （即change_end_time值为空）则该状态不参与计算，计算结果存入MySQL数据库shtd_industry的machine_running_median表中
    （表结构如下），然后在Linux的MySQL命令行中根据所属车间、设备id均为降序排序，查询出前5条数据，将SQL语句复制粘贴至客户端桌面【
    Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序
    号下；
 */
object t4 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t4")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val fact_change_record=spark.table("dwd.fact_change_record")
      .where(col("changerecordstate") === lit("运行"))
      .where(col("changeendtime").isNotNull)

      fact_change_record
      .createOrReplaceTempView("fact_change_record")


    spark.table("dwd.dim_machine")
      .createOrReplaceTempView("dim_machine")

    //  todo 拿到每个设备的运行时长
    val run_time_data=spark.sql(
      """
        |select
        |d.machinefactory as machine_factory,
        |r.changemachineid as machine_id,
        |(unix_timestamp(r.changeendtime) - unix_timestamp(r.changestarttime)) as total_running_time
        |from fact_change_record as r
        |join dim_machine as d
        |on d.basemachineid=r.changemachineid
        |""".stripMargin)

    run_time_data.createOrReplaceTempView("temp01")



    //  todo 拿到每个厂房的运行时间的中位数
    val r1 = spark.sql(
      """
        |select distinct
        |machine_factory as factory,
        |machine_id as machineid,
        |percentile_approx(total_running_time,0.5) over(partition by machine_factory) as median_run_time
        |from temp01
        |""".stripMargin)


    val result=r1.join(run_time_data,r1("machineid")===run_time_data("machine_id"))
      .filter(col("median_run_time") === col("total_running_time"))
      .select(
        "machine_id",
       "machine_factory",
        "total_running_time"
      )
      .distinct()


    result.show













    spark.close()
  }

}
