package org.fickler

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.sql.functions._

object UkCasualtyType {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[*]").setAppName("UK")
    val sc = new SparkContext(conf)

    val spark = SparkSession.builder.appName("UkCasualtyType").getOrCreate()

    // 读取第一个文件（包含 casualty_type）
    val file1Path = "src/main/java/org/datas/dft-road-casualty-statistics-casualty-1979-2020.csv"
    val file1 = spark.read.option("header", "true").csv(file1Path)

    // 读取第二个文件（包含 number_of_casualties）
    val file2Path = "src/main/java/org/datas/dft-road-casualty-statistics-accident-1979-2020.csv"
    val file2 = spark.read.option("header", "true").csv(file2Path)

    // 将两个文件根据 accident_index 进行连接
    val joinedDF = file1.join(file2, "accident_index")

    // 根据 casualty_type 列进行分组计算总伤亡人数
    val casualtiesByType = joinedDF.groupBy("casualty_type")
      .agg(sum("number_of_casualties").alias("total_casualties"))
      .orderBy(desc("total_casualties"))

    // 显示结果
    casualtiesByType.show()

    val outputPath = "src/main/java/org/UkResult/UkCasualtyType"
    casualtiesByType
      .coalesce(1)
      .write
      .mode("overwrite")
      .option("header", "true")
      .csv(outputPath)

    val mysqlUrl = "jdbc:mysql://localhost:3306/ukaccident"
    val mysqlProperties = new java.util.Properties()
    mysqlProperties.setProperty("user", "root")
    mysqlProperties.setProperty("password", "011216")
    mysqlProperties.setProperty("driver", "com.mysql.jdbc.Driver")

    casualtiesByType.write
      .mode(SaveMode.Overwrite)
      .jdbc(mysqlUrl, "UkCasualtyType", mysqlProperties)

    spark.stop()
  }
}

/**
bin/spark-submit \
--class org.fickler.UkAge \
--master local[2] \
./examples/jars/accidentgroup3-1.0-SNAPSHOT.jar \
10
 */