package ds_industry_2025.industry.gy_05.T3

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

/*
      3、编写Scala代码，使用Spark根据dwd层的fact_change_record表和dim_machine表统计，计算每个车间设备的月平均运行时长与所有
      设备的月平均运行时长对比结果（即设备状态为“运行”，结果值为：高/低/相同），月份取值使用状态开始时间的月份，若某设备的运行状态
      当前未结束（即change_end_time值为空）则该状态不参与计算，计算结果存入MySQL数据库shtd_industry
      的machine_running_compare表中（表结构如下），然后在Linux的MySQL命令行中根据车间号降序排序，查询出前2条，将SQL语句复
      制粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交
      结果.docx】中对应的任务序号下;
 */
object t5 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t5")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val fact_hdfs="hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dwd.db/fact_change_record"
    val machine_hdfs="hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dwd.db/dim_machine"

    spark.read.format("hudi").load(fact_hdfs)
      .where(col("changeendtime").isNotNull)
      .createOrReplaceTempView("f")

    spark.read.format("hudi").load(machine_hdfs)
      .createOrReplaceTempView("m")


    val result = spark.sql(
      """
        |select
        |concat_ws("-",year,month) as start_month,
        |machine_factory,
        |case
        |when factory_avg > company_avg then "高"
        |when factory_avg < company_avg then "低"
        |else "相同"
        |end as comparsion,
        |factory_avg,
        |company_avg
        |from(
        |select distinct
        |machine_factory,
        |avg(run_time) over(partition by year,month) as company_avg,
        |avg(run_time) over(partition by machine_factory,year,month) as factory_avg,
        |year,month
        |from(
        |select
        |m.machinefactory as machine_factory,
        |(unix_timestamp(f.changeendtime) - unix_timestamp(f.changestarttime)) as run_time,
        |year(f.changestarttime) as year,
        |month(f.changestarttime) as month
        |from f
        |join m on m.basemachineid=f.changemachineid
        |) as r1
        |) as r2
        |""".stripMargin)

    result.show





    spark.close()
  }

}
