package ds_industry_2025.industry.gy_04.T3

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
/*
    3、编写Scala代码，使用Spark根据dwd_ds_hudi层的fact_change_record表关联dim_machine表统计每个车间中所有设备运行时长
    （即设备状态为“运行”）的中位数在哪个设备（为偶数时，两条数据原样保留输出），若某个设备运行状态当前未结束（即change_end_time值
    为空）则该状态不参与计算，计算结果存入ClickHouse数据库shtd_industry的machine_running_median表中（表结构如下），然后
    在Linux的ClickHouse命令行中根据所属车间、设备id均为降序排序，查询出前5条数据，将SQL语句复制粘贴至客户端桌面【Release\任
    +务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下;
 */
object t6 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t1")
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions", "org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val fact_change_record_path="hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dwd.db/fact_change_record"
    val machine_path="hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dwd.db/dim_machine"

    spark.read.format("hudi").load(fact_change_record_path)
      .where(col("changeendtime").isNotNull)
      .where(col("changerecordstate")==="运行")
      .createOrReplaceTempView("c")

    spark.read.format("hudi").load(machine_path)
      .createOrReplaceTempView("d")

    //  todo 先求出每一条数据的运行时间
    val r1 = spark.sql(
      """
        |select
        |c.changemachineid as machine_id,
        |d.machinefactory as machine_factory,
        |(unix_timestamp(c.changeendtime) - unix_timestamp(c.changestarttime)) as run_time
        |from c
        |join d on d.basemachineid=c.changemachineid
        |""".stripMargin)

    //  todo 求出每个车间的运行中位数
    //  todo 注意这里的percentile_approx()需要三个参数，第三个参数在sql里面好像可以省略,第三个参数默认为10000，是表示精度
    val r2 = r1.withColumn(
        "median_time",
        percentile_approx(col("run_time"), lit(0.5),lit(10000)).over(Window.partitionBy("machine_factory"))
      )
      .select("machine_factory", "median_time")
      .withColumnRenamed("machine_factory","factory")
      .distinct()

    val result = r2.join(r1,r1("machine_factory")===r2("factory"))
      .filter(col("run_time") === col("median_time"))
      .select(
        r1("machine_factory"),
        r1("machine_id"),
        r1("run_time")
      )
      .withColumnRenamed("run_time", "total_running_time")
      .distinct()

    result.show

//    result.write.format("jdbc")
//      .option("url","jdbc:clickhouse:192.168.40.110:8123/shtd_industry")
//      .option("user","default")
//      .option("password","")
//      .option("driver","com.clickhouse.jdbc.ClickHouseDriver")
//      .option("dbtable","machine_running_median")
//      .save()




    spark.close()
  }

}
