package org.zhazhahei

import org.apache.spark.api.java.function
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SaveMode, SparkSession, functions}
import org.apache.spark.sql.functions._

object accidentcount_time {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[*]").setAppName("YourAppName")
    val sc = new SparkContext(conf)



    val spark = SparkSession.builder.appName("accidentcount_time").getOrCreate()


    val accidents = spark.read.format("csv").option("header", "true").load("src\\main\\java\\org\\datas\\US_Accidents_March23.csv")

    // 将 start_time 列转换为日期类型
    val accidentsWithDate = accidents
      .withColumn("start_year", substring(col("start_time"), 1, 4).cast("int"))
      .withColumn("start_date", to_date(col("start_time")))

    // 提取年份并进行统计
    val yearCounts = accidentsWithDate.groupBy(year(col("start_date")).alias("year")).agg(count("*").alias("accident_count"))

    yearCounts.show()


    val jdbcURL = "jdbc:mysql://localhost:3306/accident"
    val tableName = "time_table"
    val connectionProperties = new java.util.Properties()
    connectionProperties.setProperty("user", "root")
    connectionProperties.setProperty("password", "011216")

    yearCounts.write
      .mode(SaveMode.Overwrite)
      .jdbc(jdbcURL, tableName, connectionProperties)

    val outputPath = "src\\main\\java\\org\\USresult"
    yearCounts.coalesce(1).write.mode(SaveMode.Overwrite).csv(outputPath)

    spark.stop()
  }
}

