package industry_2024.industry_05.indicator

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.col

import java.util.Properties

object indicator02 {
  def main(args: Array[String]): Unit = {
    //   todo 这一题答案有问题，因为没时间就没改了，直接去看2025年我写的是对的
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .enableHiveSupport()
      .getOrCreate()

    //  在拿到数据的时候就将不是运行状态和没有结束运行的数据去除掉
    spark.table("dwd05.fact_change_record")
      .where(
        col("changerecordstate")==="运行"
      )
      .filter(col("changeendtime").isNotNull)
      .createOrReplaceTempView("fact_change_record")

    spark.table("dwd05.dim_machine")
      .createOrReplaceTempView("dim_machine")

  //  方法1,如果这里使用percentile()的话就会保留一位小数，用下面的就没有小数
    spark.sql(
      """
        |select
        |f.changemachineid as change_id,
        |dm.machinefactory as machine_factory,
        |percentile_approx(
        |unix_timestamp(f.changeendtime) - unix_timestamp(f.changestarttime),
        |0.5
        |) as total_running_time
        |from fact_change_record as f
        |join dim_machine as dm
        |on dm.basemachineid =f.changemachineid
        |group by f.changemachineid,dm.machinefactory
        |""".stripMargin).show()


    //  方法2
  val result=spark.sql(
    """
      |select
      |f.changemachineid as machine_id,
      |d.machinefactory as machine_factroy,
      |percentile(
      |unix_timestamp(f.changeendtime) - unix_timestamp(f.changestarttime),
      |0.5
      |) over(partition by f.changemachineid,d.machinefactory order by f.changestarttime) as total_running_time
      |from fact_change_record as f
      |join dim_machine as d
      |on d.basemachineid=f.changemachineid
      |""".stripMargin)

    val mysql_connect=new Properties()
    mysql_connect.setProperty("user","root")
    mysql_connect.setProperty("password","123456")
    mysql_connect.setProperty("driver","com.mysql.jdbc.Driver")

    //  将数据直接写入mysql
    result.write.mode("overwrite")
      .jdbc("jdbc:mysql://192.168.40.110:3306/shtd_industry?useSSL=false","machine_running_median05",mysql_connect)




    spark.close()
  }

}
