package ds_industry_2025.industry.gy_10.T3

import org.apache.hudi.DataSourceWriteOptions.{PARTITIONPATH_FIELD, PRECOMBINE_FIELD, RECORDKEY_FIELD}
import org.apache.hudi.QuickstartUtils.getQuickstartWriteConfigs
import org.apache.spark.sql.SparkSession
/*
    2、编写scala代码，使用Spark根据hudi_gy_dwd层的fact_machine_data表统计出每日每台设备，状态为“运行”的时长
    （若运行无结束时间，则需根据时间判断这个设备的运行状态的下一个状态是哪条数据，将下一个状态的数据的时间置为这个设备运行状态的结
    束时间,如果设备数据的运行状态不存在下一个状态，则该设备这个阶段数据的运行状态不参与计算，即该设备的这个阶段数据的运行状态时长
    按0计算），将结果数据写入hudi_gy_dws层的表machine_data_total_time中，然后使用spark-sql的cli根据machine_id降序
    和machine_record_date升序排序查询前5条数据，将SQL语句复制粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序
    号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下；
 */
object t2 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t2")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val fact_machine_data="hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dwd.db/fact_machine_data"

    spark.read.format("hudi").load(fact_machine_data)
      .createOrReplaceTempView("data")

    // todo 第一段sql应该没有考虑到如果没有下一个状态，第二段sql考虑到了，所以感觉第一段是错误的,但是不是百分百确认


//    spark.sql(
//      """
//        |select distinct
//        |machine_id,
//        |day,
//        |sum(run_time) over(partition by machine_id,day) as time
//        |from(
//        |select distinct
//        |machine_id,
//        |case
//        |when machine_record_state ="运行"  then
//        |unix_timestamp(lead(machine_record_time,1,"1970-01-01") over(partition by machine_id order by machine_record_time)) - unix_timestamp(machine_record_time)
//        |else 0
//        |end as run_time,
//        |day
//        |from(
//        |select
//        |machineid as machine_id,
//        |machinerecordstate as machine_record_state,
//        |machinerecorddate as machine_record_time,
//        |to_date(machinerecorddate) as day
//        |from data
//        |) as r1
//        |) as r2
//        |""".stripMargin).show


    val result = spark.sql(
      """
        |select distinct
        |machine_id,
        |day as machine_record_date,
        |sum(run_time) over(partition by machine_id,day) as total_time
        |from(
        |select
        |machine_id,
        |case
        |when machine_record_state="运行" and next_time is not null then
        |unix_timestamp(next_time) - unix_timestamp(machine_record_date)
        |else 0
        |end as run_time,
        |day
        |from(
        |select distinct
        |machineid as machine_id,
        |machinerecordstate as machine_record_state,
        |machinerecorddate as machine_record_date,
        |lead(machinerecorddate,1) over(partition by machineid,to_date(machinerecorddate) order by machinerecorddate) as next_time,
        |to_date(machinerecorddate) as day
        |from data
        |) as r1
        |) as r2
        |""".stripMargin)

    spark.sql("create database if not exists hudi_gy_dws")
    spark.sql("use hudi_gy_dws")

    //  todo 创建hudi表
    spark.sql(
      """
        |create table if not exists machine_data_total_time(
        |machine_id int,
        |machine_record_date String,
        |total_time int
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="machine_id",
        |preCombineField="total_time",
        |hoodie.datasource.hive_aync.mode="hms"
        |)partitioned by(machine_record_date)
        |""".stripMargin)

    //  todo 写入数据
    result.write.format("hudi").mode("append")
      .options(getQuickstartWriteConfigs)
      .option(RECORDKEY_FIELD.key(),"machine_id")
      .option(PRECOMBINE_FIELD.key(),"total_time")
      .option(PARTITIONPATH_FIELD.key(),"machine_record_date")
      .option("hoodie.table.name","machine_data_total_time")
      .save("hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dws.db/machine_data_total_time")

    // todo select* from machine_data_total_time order by machine_id desc ,machine_record_date asc limit 5;




    spark.close()
  }

}
