package industry_2024.industry_04.indicator

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.col

object indicator04 {
  def main(args: Array[String]): Unit = {
    /*
        编写Scala代码，使用Spark根据dwd_ds_hudi层的fact_produce_record表，基于全量历史数据计算各设备生产一个产品的平均耗时，
        produce_code_end_time值为1900-01-01 00:00:00的数据为脏数据，需要剔除（注：fact_produce_record表中，一条数据代表加工一个产
        品，produce_code_start_time字段为开始加工时间，produce_code_end_time字段为完成加工时间），将设备每个产品的耗时与该设备平均耗时作比
        较，保留耗时高于平均值的产品数据，将得到的数据写入ClickHouse数据库shtd_industry的machine_produce_per_avgtime表中（表结构如下），然后
        在Linux的ClickHouse命令行中根据设备id降序排序查询前3条数据，将SQL语句复制粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号
        下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下
     */
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第四题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .config("spark.sql.legacy.parquet.datetimeRebaseModeInRead","LEGACY")
      .enableHiveSupport()
      .getOrCreate()

    val hdfs_path="hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_dwd04.db/fact_produce_record"
    spark.read.format("hudi").load(hdfs_path)
      .where(col("producecodeendtime")!=="1900-01-01 00:00:00")
      .dropDuplicates()
      .createOrReplaceTempView("fact")

    val result=spark.sql(
      """
        |with
        |one as (
        |select
        |f.producerecordid as produce_record_id,
        |f.producemachineid as produce_machine_id,
        |unix_timestamp(f.producecodeendtime) - unix_timestamp(f.producecodestarttime) as time
        |from fact as f
        |),
        |all as (
        |select
        |f.producemachineid as produce_machine_id,
        |avg(unix_timestamp(f.producecodeendtime) - unix_timestamp(f.producecodestarttime))
        |over(partition by f.producemachineid)  as produce_per_avgtime
        |from fact as f
        |)
        |select distinct
        |o.produce_record_id,
        |o.produce_machine_id,
        |o.time as producetime,
        |a.produce_per_avgtime
        |from one as o
        |join all as a
        |on a.produce_machine_id=o.produce_machine_id
        |where o.time > a.produce_per_avgtime
        |""".stripMargin)

    result.write.mode("append")
      .format("jdbc")
      .option("url","jdbc:clickhouse://192.168.40.110:8123/shtd_industry")
      .option("user","default")
      .option("password","")
      .option("dbtable","machine_produce_per_avgtime04")
      .save()

    println("写入完成")


    spark.close()
  }

}
