package ds_industry_2025.industry.gy_05.T3

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, expr}

import java.util.Properties
/*
    2、编写Scala代码，使用Spark根据dwd层的fact_change_record表关联dim_machine表统计每个车间中所有设备运行时长（即设备状态
    为“运行”）的中位数在哪个设备（为偶数时，两条数据原样保留输出），若某个设备运行状态当前未结束（即change_end_time值为空）则该
    状态不参与计算，计算结果存入MySQL数据库shtd_industry的machine_running_median表中（表结构如下），然后在Linux的MySQL命
    令行中根据所属车间、设备id均为降序排序，查询出前5条数据，将SQL语句复制粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的
    任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下;
 */
object t2 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t2")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    // todo 先将不是运行状态和运行状态未结束的数据赛选掉
    spark.table("dwd.fact_change_record")
      .where(col("changerecordstate")==="运行")
      .where(col("changeendtime").isNotNull)
      .createOrReplaceTempView("change_record")

    spark.table("dwd.dim_machine")
      .createOrReplaceTempView("dim_machine")

//    //  todo 先分析出每个厂房里面的所有设备的运行时间的中位数
//    val r1 = spark.sql(
//      """
//        |select distinct
//        |machine_factory,machine_id,
//        |percentile_approx(run_time,0.5) as median
//        |from(
//        |select
//        |d.machinefactory as machine_factory,
//        |c.changemachineid as machine_id,
//        |(unix_timestamp(c.changestarttime) - unix_timestamp(c.changeendtime)) as run_time
//        |from change_record as c
//        |join dim_machine as d
//        |on d.basemachineid=c.changemachineid
//        |) as r1
//        |group by machine_factory
//        |""".stripMargin)
//      .select("machine_factory","median")


    //  todo 1.首先分析出每个厂，每个机器的运行时间
    val r1 = spark.sql(
      """
        |select
        |d.machinefactory as factory,
        |c.changemachineid as machine_id,
        |(unix_timestamp(c.changeendtime) - unix_timestamp(c.changestarttime)) as run_time
        |from change_record as c
        |join dim_machine as d
        |on d.basemachineid=c.changemachineid
        |""".stripMargin)

    //  todo 2.然后分析出每个厂的运行时间的中位数
    val r2 = r1.groupBy(col("factory"))
      .agg(
        expr("percentile_approx(run_time,0.5)").as("median")
      )
      .select(col("factory").as("machine_factory"),col("median"))


     // todo 3.去r1里面赛选运行时间符合中位数的机器得到结果
     val result = r2.join(r1, col("factory") === col("machine_factory"), "inner")
       .filter(col("median") === col("run_time"))  // 过滤掉不符合中位数运行时间的数据
       .select("machine_id", "machine_factory", "run_time")
       .dropDuplicates()

    val conn = new Properties()
    conn.setProperty("user", "root")
    conn.setProperty("password", "123456")
    conn.setProperty("driver", "com.mysql.jdbc.Driver")


    result.write.mode("overwrite")
      .jdbc("jdbc:mysql://192.168.40.110:3306/shtd_industry?useSSL=false","machine_running_median",conn)

    println("写入完成")





    spark.close()
  }

}
