package ds_industry_2025.industry.gy_09.T3

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

/*
    2、编写scala代码，使用Spark根据dwd层的fact_produce_record表，基于全量历史增加设备生产一个产品的平均耗时字段
    （produce_per_avgtime），produce_code_end_time值为1900-01-01 00:00:00的数据为脏数据，需要剔除，并以
    produce_record_id和ProduceMachineID为联合主键进行去重（注：fact_produce_record表中，一条数据代表加工一个产品，
    produce_code_start_time字段为开始加工时间，produce_code_end_time字段为完成加工时间），将得到的数据提取下表所需字段然后
    写入dws层的表machine_produce_per_avgtime中，然后使用hive cli根据设备id降序排序查询前3条数据，将SQL语句复制粘贴至客户
    端桌面【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对
    应的任务序号下；
 */
object t2 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t2")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()



    spark.table("dwd.fact_produce_record")
      .where(col("producecodeendtime") !== lit("1900-01-01 00:00:00").cast("timestamp"))
      .dropDuplicates(Seq("producerecordid","producemachineid"))
      .createOrReplaceTempView("produce_record")

    val result = spark.sql(
      """
        |select distinct
        |producerecordid as produce_record_id,
        |producemachineid as produce_machine_id,
        |run_time as producetime,
        |avg(run_time) over(partition by producemachineid) as produce_peravg_time
        |from(
        |select
        |producerecordid,
        |producemachineid,
        |(unix_timestamp(producecodeendtime) - unix_timestamp(producecodestarttime)) as run_time
        |from produce_record
        |) as r1
        |""".stripMargin)

      spark.sql("create database if not exists dws")
      result.write.format("hive").mode("overwrite")
        .saveAsTable("dws.machine_produce_per_avgtime")


    spark.close()

  }

}
