package ods_industry_2024.gy_10.indicator

import org.apache.hudi.DataSourceWriteOptions.{PARTITIONPATH_FIELD, PRECOMBINE_FIELD, RECORDKEY_FIELD}
import org.apache.hudi.QuickstartUtils.getQuickstartWriteConfigs
import org.apache.spark.sql.SparkSession


object indicator02 {
  def main(args: Array[String]): Unit = {
    /*
        2、编写scala代码，使用Spark根据hudi_gy_dwd层的fact_machine_data表统计出每日每台设备，状态为“运行”的时长（
        若运行无结束时间，则需根据时间判断这个设备的运行状态的下一个状态是哪条数据，将下一个状态的数据的时间置为这个设备运
        行状态的结束时间,如果设备数据的运行状态不存在下一个状态，则该设备这个阶段数据的运行状态不参与计算，即该设备的这个阶
        段数据的运行状态时长按0计算），将结果数据写入hudi_gy_dws层的表machine_data_total_time中，然后使用spark-sql
        的cli根据machine_id降序和machine_record_date升序排序查询前5条数据，将SQL语句复制粘贴至客户端桌
        面【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下；
     */



    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第二题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sq.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
//      .config("spark.sql.legacy.avro.datetimeRebaseModeInWrite","LEGACY")
//      .config("spark.sql.legacy.parquet.datetimeRebaseModeRead","LEGACY")
      .enableHiveSupport()
      .getOrCreate()



//    val hdfs_path="hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dwd10.db/fact_machine_data"
//    spark.read.format("hudi").load(hdfs_path).limit(5).show

    spark.table("hudi_gy_dwd10.fact_machine_data")
      .createOrReplaceTempView("machine_data")

    val result=spark.sql(
      """
        |select distinct
        |r2.machine_id,
        |r2.run_day as machine_record_date,
        |sum(r2.diff_time) as total_time
        |from(
        |select
        |r1.machine_id,
        |r1.run_day,
        |case
        |when r1.state="运行"
        |then unix_timestamp(lead(r1.run_time,1,r1.run_time) over(partition by r1.machine_id order by r1.run_time)) - unix_timestamp(r1.run_time)
        |else 0
        |end
        |as diff_time
        |from(
        |select distinct
        |d.machineid as machine_id,
        |date_format(d.machinerecorddate,"yyyy-MM-dd") as run_day,
        |d.machinerecorddate as run_time,
        |d.machinerecordstate as state
        |from machine_data as d
        |) as r1
        |) as r2
        |group by r2.machine_id,r2.run_day
        |""".stripMargin)


//    result.write.mode("append")
//      .format("hudi")
//      .options(getQuickstartWriteConfigs)
//      .option(RECORDKEY_FIELD.key(),"machine_id")
//      .option(PRECOMBINE_FIELD.key(),"total_time")
//      .option(PARTITIONPATH_FIELD.key(),"machine_record_date")
//      .option("hoodie.table.name","machine_data_total_time")
//      .save("hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dws10.db/machine_data_total_time")

    result.write.mode("overwrite")
      .saveAsTable("hudi_gy_dws10.machine_data_total_time")



println("第一题完成")

    spark.close()



  }

}
