package org.fickler

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.sql.functions._


object UkAge {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[*]").setAppName("UK")
    val sc = new SparkContext(conf)

    val spark = SparkSession.builder.appName("UkAge").getOrCreate()

    //    val inputPath = "src/main/java/org/datas/dft-road-casualty-statistics-vehicle-1979-2020.csv"
    val inputPath = "/opt/module/spark-standalone/data/uk/dft-road-casualty-statistics-vehicle-1979-2020.csv"
    val df = spark.read.option("header", "true").csv(inputPath)

    // 根据age_band_of_driver列进行分组计数
    val ageBandDistribution = df.groupBy("age_band_of_driver")
      .agg(count("*").alias("accident_count"))
      .orderBy("age_band_of_driver")

    // 显示结果
    ageBandDistribution.show()

    // 将结果保存为 CSV 文件
    val outputPath = "/opt/module/spark-standalone/data/UkResult/UkAge"
    //    val outputPath = "src/main/java/org/UkResult/UkCasualtyType"
    ageBandDistribution
      .coalesce(1) // 将结果合并为一个分区
      .write
      .mode("overwrite") // 如果文件已存在，覆盖掉
      .option("header", "true")
      .csv(outputPath)

    //    // 将结果导入到 MySQL 数据库中
    //    val mysqlUrl = "jdbc:mysql://localhost:3306/ukaccident"
    //    val mysqlProperties = new java.util.Properties()
    //    mysqlProperties.setProperty("user", "root")
    //    mysqlProperties.setProperty("password", "011216")
    //    mysqlProperties.setProperty("driver", "com.mysql.jdbc.Driver")

    //    ageBandDistribution.write
    //      .mode(SaveMode.Overwrite) // 如果表已存在，覆盖之前的数据
    //      .jdbc(mysqlUrl, "UkAge", mysqlProperties)

    // 停止SparkSession
    spark.stop()

  }
}

/**
bin/spark-submit \
--class org.fickler.UkAge \
--master local[2] \
./examples/jars/accidentgroup3-1.0-SNAPSHOT.jar \
10
 */