package industry_2024.industry_09.indicator

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.col

object indicator02 {
  def main(args: Array[String]): Unit = {
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第二题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    spark.table("dwd09.fact_produce_record")
      .filter(col("producecodeendtime")!=="1900-01-01 00:00:00")
      .dropDuplicates(Seq("producerecordid","producemachineid"))
      .createOrReplaceTempView("data")

    val result=spark.sql(
      """
        |select distinct
        |data.producerecordid as produce_record_id,
        |data.producemachineid as produce_machine_id,
        |unix_timestamp(data.producecodeendtime) - unix_timestamp(data.producecodestarttime) as producetime,
        |r1.produce_per_avgtime
        |from data
        |left join (
        |select
        |producemachineid as produce_machine_id,
        |avg(unix_timestamp(producecodeendtime) - unix_timestamp(producecodestarttime)) as produce_per_avgtime
        |from data
        |group by producemachineid
        |) as r1
        |on r1.produce_machine_id=data.producemachineid
        |""".stripMargin)


    spark.sql("use dws09")
    spark.sql("drop table if exists machine_produce_per_avgtime09")
    spark.sql(
      """
        |create table if not exists machine_produce_per_avgtime09(
        |produce_record_id  int,
        |produce_machine_id int,
        |producetime int,
        |produce_per_avgtime double
        |)
        |""".stripMargin)


    result.write.mode("append")
      .format("hive")
      .saveAsTable("dws09.machine_produce_per_avgtime09")







    spark.close()
  }

}
