package com.chinasoft.scala

import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.types._
import org.apache.spark.sql.Row
import java.util.Properties

object TestPollutionAnalysis {

  def main(args: Array[String]): Unit = {
    // 创建不使用Hive的SparkSession
    val spark = SparkSession
      .builder()
      .appName("TestPollutionAnalysis")
      .master("local[*]")
      .config("spark.sql.adaptive.enabled", "true")
      .getOrCreate()

    try {
      println("=== 创建测试数据 ===")
      val testData = createTestData(spark)
      testData.show(20)
      println(s"测试数据总数: ${testData.count()}")

      println("\n=== 开始污染物浓度分析 ===")
      val (nationalAvg, regionalAvg) =
        analyzePollutantConcentration(spark, testData)

      println("\n=== 开始时间趋势分析 ===")
      val (hourlyTrend, dailyTrend) = analyzeTimeTrend(spark, testData)

      println("\n=== 开始城市排名分析 ===")
      val (cityRanking, keyCityAnalysis) = analyzeCityPollution(spark, testData)

      println("\n=== 开始污染物分布分析 ===")
      val pollutantDistribution = analyzePollutantDistribution(spark, testData)

      println("\n=== 开始热点分析 ===")
      val hotspotAnalysis = analyzeHotspotRegions(spark, testData)

      println("\n=== 开始写入MySQL ===")
      writeToMySQL(
        nationalAvg,
        regionalAvg,
        hourlyTrend,
        dailyTrend,
        cityRanking,
        keyCityAnalysis,
        pollutantDistribution,
        hotspotAnalysis
      )

      println("=== 所有分析任务完成 ===")

    } catch {
      case e: Exception =>
        println(s"分析过程中出现错误: ${e.getMessage}")
        e.printStackTrace()
    } finally {
      spark.stop()
    }
  }

  // 创建测试数据
  def createTestData(spark: SparkSession): DataFrame = {
    import spark.implicits._

    val schema = StructType(
      Array(
        StructField("city", StringType, true),
        StructField("type", StringType, true),
        StructField("concentration_value", DoubleType, true),
        StructField("date_value", StringType, true),
        StructField("hour", IntegerType, true)
      )
    )

    val testRows = Seq(
      // 北京数据
      Row("北京", "PM2.5", 35.5, "2023-01-01", 0),
      Row("北京", "PM2.5", 38.2, "2023-01-01", 1),
      Row("北京", "PM2.5", 42.1, "2023-01-01", 2),
      Row("北京", "PM10", 65.5, "2023-01-01", 0),
      Row("北京", "PM10", 68.2, "2023-01-01", 1),
      Row("北京", "SO2", 12.5, "2023-01-01", 0),
      Row("北京", "NO2", 45.3, "2023-01-01", 0),

      // 上海数据
      Row("上海", "PM2.5", 28.5, "2023-01-01", 0),
      Row("上海", "PM2.5", 30.1, "2023-01-01", 1),
      Row("上海", "PM2.5", 32.3, "2023-01-01", 2),
      Row("上海", "PM10", 48.5, "2023-01-01", 0),
      Row("上海", "PM10", 51.1, "2023-01-01", 1),
      Row("上海", "SO2", 8.5, "2023-01-01", 0),
      Row("上海", "NO2", 38.3, "2023-01-01", 0),

      // 广州数据
      Row("广州", "PM2.5", 25.5, "2023-01-01", 0),
      Row("广州", "PM2.5", 27.2, "2023-01-01", 1),
      Row("广州", "PM2.5", 29.1, "2023-01-01", 2),
      Row("广州", "PM10", 42.5, "2023-01-01", 0),
      Row("广州", "PM10", 45.2, "2023-01-01", 1),
      Row("广州", "SO2", 6.5, "2023-01-01", 0),
      Row("广州", "NO2", 32.3, "2023-01-01", 0),

      // 深圳数据
      Row("深圳", "PM2.5", 22.5, "2023-01-01", 0),
      Row("深圳", "PM2.5", 24.2, "2023-01-01", 1),
      Row("深圳", "PM10", 38.5, "2023-01-01", 0),
      Row("深圳", "SO2", 5.5, "2023-01-01", 0),
      Row("深圳", "NO2", 28.3, "2023-01-01", 0),

      // 成都数据
      Row("成都", "PM2.5", 45.5, "2023-01-01", 0),
      Row("成都", "PM2.5", 48.2, "2023-01-01", 1),
      Row("成都", "PM10", 75.5, "2023-01-01", 0),
      Row("成都", "SO2", 15.5, "2023-01-01", 0),
      Row("成都", "NO2", 52.3, "2023-01-01", 0),

      // 武汉数据
      Row("武汉", "PM2.5", 38.5, "2023-01-01", 0),
      Row("武汉", "PM2.5", 41.2, "2023-01-01", 1),
      Row("武汉", "PM10", 62.5, "2023-01-01", 0),
      Row("武汉", "SO2", 11.5, "2023-01-01", 0),
      Row("武汉", "NO2", 43.3, "2023-01-01", 0)
    )

    val rdd = spark.sparkContext.parallelize(testRows)
    spark.createDataFrame(rdd, schema)
  }

  // 以下方法与原版相同，但使用传入的DataFrame
  def analyzePollutantConcentration(
      spark: SparkSession,
      data: DataFrame
  ): (DataFrame, DataFrame) = {
    data.createOrReplaceTempView("air_test")

    println("=== 全国污染物平均浓度分析 ===")
    val nationalAvg = spark.sql(s"""
      SELECT
        type as pollutant_type,
        AVG(concentration_value) as avg_concentration,
        MAX(concentration_value) as max_concentration,
        MIN(concentration_value) as min_concentration,
        STDDEV(concentration_value) as std_concentration,
        COUNT(*) as sample_count
      FROM air_test
      WHERE concentration_value IS NOT NULL
        AND type IS NOT NULL
      GROUP BY type
      ORDER BY avg_concentration DESC
    """)

    nationalAvg.show(truncate = false)

    println("=== 区域污染物平均浓度分析 ===")
    val regionalAvg = spark.sql(s"""
      SELECT
        city,
        type as pollutant_type,
        AVG(concentration_value) as avg_concentration,
        COUNT(*) as sample_count
      FROM air_test
      WHERE concentration_value IS NOT NULL
        AND type IS NOT NULL
        AND city IS NOT NULL
      GROUP BY city, type
      ORDER BY city, avg_concentration DESC
    """)

    regionalAvg.show(30, truncate = false)

    (nationalAvg, regionalAvg)
  }

  def analyzeTimeTrend(
      spark: SparkSession,
      data: DataFrame
  ): (DataFrame, DataFrame) = {
    data.createOrReplaceTempView("air_test")

    println("=== 小时趋势分析 ===")
    val hourlyTrend = spark.sql(s"""
      SELECT
        hour,
        type as pollutant_type,
        AVG(concentration_value) as avg_concentration,
        COUNT(*) as sample_count
      FROM air_test
      WHERE concentration_value IS NOT NULL
        AND type IS NOT NULL
        AND hour IS NOT NULL
      GROUP BY hour, type
      ORDER BY hour, type
    """)

    hourlyTrend.show(24, truncate = false)

    println("=== 日期趋势分析 ===")
    val dailyTrend = spark.sql(s"""
      SELECT
        date_value,
        type as pollutant_type,
        AVG(concentration_value) as avg_concentration,
        COUNT(*) as sample_count
      FROM air_test
      WHERE concentration_value IS NOT NULL
        AND type IS NOT NULL
        AND date_value IS NOT NULL
      GROUP BY date_value, type
      ORDER BY date_value, type
    """)

    dailyTrend.show(truncate = false)

    (hourlyTrend, dailyTrend)
  }

  def analyzeCityPollution(
      spark: SparkSession,
      data: DataFrame
  ): (DataFrame, DataFrame) = {
    data.createOrReplaceTempView("air_test")

    println("=== 城市污染排名分析 ===")
    val cityRanking = spark.sql(s"""
      SELECT
        city,
        type as pollutant_type,
        AVG(concentration_value) as avg_concentration,
        RANK() OVER(PARTITION BY type ORDER BY AVG(concentration_value) DESC) as pollution_rank,
        COUNT(*) as sample_count
      FROM air_test
      WHERE concentration_value IS NOT NULL
        AND type IS NOT NULL
        AND city IS NOT NULL
      GROUP BY city, type
      ORDER BY type, pollution_rank
    """)

    cityRanking.show(30, truncate = false)

    println("=== 重点城市分析 ===")
    val keyCities = List("北京", "上海", "广州", "深圳", "成都", "武汉")
    val keyCitiesStr = keyCities.map(city => s"'$city'").mkString(",")

    val keyCityAnalysis = spark.sql(s"""
      SELECT
        city,
        type as pollutant_type,
        AVG(concentration_value) as avg_concentration,
        MAX(concentration_value) as max_concentration,
        MIN(concentration_value) as min_concentration,
        COUNT(*) as sample_count
      FROM air_test
      WHERE city IN ($keyCitiesStr)
        AND concentration_value IS NOT NULL
        AND type IS NOT NULL
        AND city IS NOT NULL
      GROUP BY city, type
      ORDER BY city, type
    """)

    keyCityAnalysis.show(truncate = false)

    (cityRanking, keyCityAnalysis)
  }

  def analyzePollutantDistribution(
      spark: SparkSession,
      data: DataFrame
  ): DataFrame = {
    data.createOrReplaceTempView("air_test")

    println("=== 污染物类型分布分析 ===")
    val pollutantDistribution = spark.sql(s"""
      SELECT
        type as pollutant_type,
        COUNT(*) as record_count,
        ROUND(COUNT(*) * 100.0 / (SELECT COUNT(*) FROM air_test), 2) as percentage,
        AVG(concentration_value) as avg_concentration
      FROM air_test
      WHERE type IS NOT NULL
      GROUP BY type
      ORDER BY record_count DESC
    """)

    pollutantDistribution.show(truncate = false)
    pollutantDistribution
  }

  def analyzeHotspotRegions(spark: SparkSession, data: DataFrame): DataFrame = {
    import org.apache.spark.sql.functions._
    import org.apache.spark.sql.expressions.Window

    data.createOrReplaceTempView("air_test")

    println("=== 热点区域分析 ===")
    val enrichedData = spark.sql(s"""
      SELECT 
        date_value,
        hour,
        type as pollutant_type,
        city,
        concentration_value,
        CASE 
          WHEN city IN ('北京', '天津') THEN '华北'
          WHEN city IN ('上海', '南京', '杭州') THEN '华东'
          WHEN city IN ('广州', '深圳') THEN '华南'
          WHEN city IN ('成都', '重庆') THEN '西南'
          WHEN city IN ('武汉', '长沙') THEN '华中'
          ELSE '其他'
        END as region
      FROM air_test
      WHERE concentration_value IS NOT NULL 
        AND type IS NOT NULL 
        AND city IS NOT NULL
        AND date_value IS NOT NULL
        AND hour IS NOT NULL
    """)

    val hotspotAnalysis = enrichedData
      .groupBy("region", "city", "hour", "date_value", "pollutant_type")
      .agg(
        avg("concentration_value").alias("avg_hourly_conc"),
        max("concentration_value").alias("max_hourly_conc"),
        count("*").alias("record_count")
      )

    val windowSpec = Window.partitionBy("pollutant_type", "hour")
    val hotspotWithScore = hotspotAnalysis
      .withColumn(
        "hotspot_score",
        col("avg_hourly_conc") / avg("avg_hourly_conc").over(windowSpec)
      )
      .withColumn(
        "is_hotspot",
        when(col("hotspot_score") > 1.5, 1).otherwise(0)
      )

    hotspotWithScore.show(50, truncate = false)
    hotspotWithScore
  }

  def writeToMySQL(
      nationalAvg: DataFrame,
      regionalAvg: DataFrame,
      hourlyTrend: DataFrame,
      dailyTrend: DataFrame,
      cityRanking: DataFrame,
      keyCityAnalysis: DataFrame,
      pollutantDistribution: DataFrame,
      hotspotAnalysis: DataFrame
  ): Unit = {
    val prop = new Properties()
    prop.put("user", "root")
    prop.put("password", "password")
    prop.put("driver", "com.mysql.jdbc.Driver")
    prop.put("batchsize", "10000")
    prop.put("rewriteBatchedStatements", "true")

    val url =
      "jdbc:mysql://192.168.16.100:3306/airTest?useSSL=false&serverTimezone=UTC"

    val writeOptions = Map(
      "batchsize" -> "10000",
      "rewriteBatchedStatements" -> "true",
      "useServerPrepStmts" -> "false"
    )

    try {
      println("开始写入全国平均浓度数据...")
      nationalAvg.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "national_avg_concentration", prop)

      println("开始写入区域浓度数据...")
      regionalAvg.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "regional_concentration", prop)

      println("开始写入小时趋势数据...")
      hourlyTrend.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "hourly_trend", prop)

      println("开始写入日期趋势数据...")
      dailyTrend.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "daily_trend", prop)

      println("开始写入城市排名数据...")
      cityRanking.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "city_pollution_ranking", prop)

      println("开始写入重点城市分析数据...")
      keyCityAnalysis.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "key_city_analysis", prop)

      println("开始写入污染物分布数据...")
      pollutantDistribution.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "pollutant_distribution", prop)

      println("开始写入热点分析数据...")
      hotspotAnalysis.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "hotspot_analysis", prop)

      println("=== 所有数据写入MySQL完成 ===")

    } catch {
      case e: Exception =>
        println(s"写入MySQL时出现错误: ${e.getMessage}")
        e.printStackTrace()
    }
  }
}
