package industry_2024.industry_09.indicator

import org.apache.spark.sql.SparkSession

import java.util.Properties

object indicator03 {
  def main(args: Array[String]): Unit = {
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第三题")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
//      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
//      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    spark.table("dws09.machine_produce_per_avgtime09")
      .createOrReplaceTempView("data")

    //  要注意的就是下面的排名函数用的是dense_rank()这个函数假如两行并列第一，那么下一行为2，而不是3，3的话是rank函数造成的

    spark.sql(
      """
        |select
        |machine_id,
        |producetime as first_time
        |from(
        |select
        |produce_machine_id as machine_id,
        |producetime,
        |dense_rank() over(partition by produce_machine_id order by producetime desc ) as row
        |from data
        |)  as r1
        |where row =1
        |""".stripMargin).createOrReplaceTempView("first")

    spark.sql(
      """
        |select
        |machine_id,
        |producetime as second_time
        |from(
        |select
        |produce_machine_id as machine_id,
        |producetime,
        |dense_rank() over(partition by produce_machine_id order by producetime desc ) as row
        |from data
        |)  as r1
        |where row =2
        |""".stripMargin).createOrReplaceTempView("second")


val result=spark.sql(
  """
    |select
    |first.machine_id,
    |first.first_time,
    |second.second_time
    |from first
    |join second
    |on first.machine_id=second.machine_id
    |""".stripMargin)


    val connect=new Properties()
    connect.setProperty("user","root")
    connect.setProperty("password","123456")
    connect.setProperty("driver","com.mysql.jdbc.Driver")

    result.write.mode("overwrite")
      .jdbc("jdbc:mysql://192.168.40.110:3306/shtd_industry?useSSL=false","machine_produce_timetop2_09",connect)


    spark.close()
  }

}
