package com.sdg.bigdata

import java.util.Properties

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

/**
  * 针对全国出租车数据，然后利用spark技术进行如下业务分析
  * 1）不同年份乘车数量比例==》【饼状图】
  * 2）乘车费用直方图 ==》【直方图】
  * 3）各个地区职位数量==》【中国地图】
  * 4）乘车距离分布曲线图==》【平滑曲线图】
  * 5）不同年份收入占比  从2016年到2024年 ==》【饼状图】
  */
object AnalysisTaxi {
  val prop = new Properties()
  prop.put("user", "root")
  prop.put("password", "123456")

  def main(args: Array[String]): Unit = {
    // 创建Spark执行环境
    val conf = new SparkConf().setMaster("local[2]").setAppName("AnalysisTaxi")
    val spark = SparkSession.builder().config(conf).getOrCreate()

    // 读取数据
    val taxiDF = spark
      .read
      .format("csv")
      .option("header", "true")
      .option("inferSchema", "true") // 启用自动类型推断
     // .load("hdfs://ip:8020/user/hdfs_path/taxi_data.csv")
      .load(".\\bigdataAnalysis\\src\\main\\resources\\taxi_data.csv")

    taxiDF.show()

    taxiDF.createTempView("t_taxi")

    // 1）不同年份乘车数量比例（假设rec_date格式为yyyy-MM-dd）
    println("1.不同年份乘车数量比例")
    val rideCountsPerYear = spark.sql(
      """
      SELECT
        YEAR(rec_date) AS ride_year,
        COUNT(*) AS ride_count
      FROM
        t_taxi
      GROUP BY
        YEAR(rec_date)
      ORDER BY
        ride_year
    """)

    rideCountsPerYear.show()

    rideCountsPerYear.write.mode("overwrite")
      .jdbc("jdbc:mysql://localhost:3306/174_taix?useUnicode=true&characterEncoding=utf8", "174_taix.left1_year_count", prop)

    // 2）乘车费用直方图
    println("2.乘车费用分析")
    // 注意：直方图通常需要在外部工具中生成
    val fareHistogramQuery =
      """
  SELECT
    CASE
      WHEN take_memony <= 50 THEN 50
      WHEN take_memony > 50 AND take_memony <= 100 THEN 100
      WHEN take_memony > 100 AND take_memony <= 150 THEN 150
      WHEN take_memony > 150 AND take_memony <= 200 THEN 200
      WHEN take_memony > 200 AND take_memony <= 250 THEN 250
      WHEN take_memony > 250 AND take_memony <= 300 THEN 300
      WHEN take_memony > 300 AND take_memony <= 350 THEN 350
      WHEN take_memony > 350 AND take_memony <= 400 THEN 400
      ELSE 'Over 400' -- 如果需要包含超过400的情况
    END AS fare_bucket,
    COUNT(*) AS count
  FROM
    t_taxi
  GROUP BY
    CASE
      WHEN take_memony <= 50 THEN 50
      WHEN take_memony > 50 AND take_memony <= 100 THEN 100
      WHEN take_memony > 100 AND take_memony <= 150 THEN 150
      WHEN take_memony > 150 AND take_memony <= 200 THEN 200
      WHEN take_memony > 200 AND take_memony <= 250 THEN 250
      WHEN take_memony > 250 AND take_memony <= 300 THEN 300
      WHEN take_memony > 300 AND take_memony <= 350 THEN 350
      WHEN take_memony > 350 AND take_memony <= 400 THEN 400
      ELSE 'Over 400'
    END
  ORDER BY
    fare_bucket
  """

    val fareHistogram = spark.sql(fareHistogramQuery)
    fareHistogram.show()

    fareHistogram.write.mode("overwrite")
      .jdbc("jdbc:mysql://localhost:3306/174_taix?useUnicode=true&characterEncoding=utf8", "174_taix.left2_take_count", prop)


    // 3）各个地区乘车数量（中国地图）
    println("3.各个地区乘车数量")
    // 注意：地图可视化通常需要在外部工具中完成
    val rideCountsPerProvince = spark.sql(
      """
      SELECT
        provinces,
        COUNT(*) AS ride_count
      FROM
        t_taxi
      GROUP BY
        provinces
      ORDER BY
        ride_count DESC
    """)

    rideCountsPerProvince.show()
    rideCountsPerProvince.write.mode("overwrite")
      .jdbc("jdbc:mysql://localhost:3306/174_taix?useUnicode=true&characterEncoding=utf8", "174_taix.middle1_china_map", prop)

    // 4）乘车距离分布曲线图（平滑曲线图）
    // 注意：曲线图通常在外部工具中生成
    // 这里可以计算距离的分布情况，但平滑曲线图需要额外处理
    val distanceDF = spark.sql("SELECT FLOOR(distance / 50) * 50 AS distance_bucket," +
      " COUNT(*) AS count FROM t_taxi  GROUP BY" +
      "  FLOOR(distance / 50) * 50    ORDER BY  distance_bucket")
    distanceDF.show()
    distanceDF.write.mode("overwrite")
      .jdbc("jdbc:mysql://localhost:3306/174_taix?useUnicode=true&characterEncoding=utf8", "174_taix.right1_distance", prop)

    // 5）不同年份收入占比（假设我们有一个收入字段income，这里用乘车费用代替）
    println("5.不同年份收入占比")
    val averageIncomePerYear = spark.sql(
      """
      SELECT
        YEAR(rec_date) AS ride_year,
        AVG(take_memony) AS average_income
      FROM
        t_taxi
      GROUP BY
        YEAR(rec_date)
      ORDER BY
        ride_year
    """)
    averageIncomePerYear.show()
    averageIncomePerYear.write.mode("overwrite")
      .jdbc("jdbc:mysql://localhost:3306/174_taix?useUnicode=true&characterEncoding=utf8", "174_taix.right2_year_amount", prop)


    spark.stop()

  }

}
