package org.zhazhahei

  import org.apache.spark.{SparkConf, SparkContext}
  import org.apache.spark.sql.{SaveMode, SparkSession}
  import org.apache.spark.sql.functions._

  object accidentcount_totaltime {
    def main(args: Array[String]): Unit = {

      val conf = new SparkConf().setMaster("local[*]").setAppName("YourAppName")
      val sc = new SparkContext(conf)

      val spark = SparkSession.builder
        .appName("accidentcount_totaltime")
        .getOrCreate()

      val inputPath = "src\\main\\java\\org\\datas\\US_Accidents_March23.csv"
      val df = spark.read
        .option("header", "true")
        .csv(inputPath)

      // 将 "start_time" 和 "end_time" 列转换为时间戳类型

      val dfWithTimestamp = df
        .withColumn("start_time", to_timestamp(col("start_time")))
        .withColumn("end_time", to_timestamp(col("end_time")))

      // 计算时间差并进行统计
      val result = dfWithTimestamp
        .withColumn("time_difference", col("end_time").cast("long") - col("start_time").cast("long"))
        .groupBy("state")
        .agg(sum("time_difference").alias("total_time"))

      result.show()

      val jdbcURL = "jdbc:mysql://localhost:3306/accident"
      val tableName = "totaltime_table"
      val connectionProperties = new java.util.Properties()
      connectionProperties.setProperty("user", "root")
      connectionProperties.setProperty("password", "011216")

      result.write
        .mode(SaveMode.Overwrite)
        .jdbc(jdbcURL, tableName, connectionProperties)

      val outputPath = "src\\main\\java\\org\\USresult/totaltime"
      result.coalesce(1).write.mode(SaveMode.Overwrite).csv(outputPath)

      spark.stop()
    }
  }
//使用 withColumn 和 to_timestamp 函数将 "start_time" 和 "end_time" 列转换为时间戳类型。
//使用 withColumn 计算时间差，然后使用 groupBy 和 agg 统计每个州的时间总和。

