package com.chinasoft.scala

import org.apache.spark.sql.{DataFrame, SparkSession}
import java.util.Properties

object PollutionAnalysis {

  // 指定Hive中存储air_test表的数据库名
  private val HIVE_DATABASE = "airtest"
  // 表名常量定义，便于统一维护
  private val HIVE_TABLE = s"$HIVE_DATABASE.air_test"

  def main(args: Array[String]): Unit = {
    // 1. 使用配置类创建SparkSession
    val spark = SparkConfig.getSparkSession("PollutionAnalysis")

    try {
      // 测试环境连接
      println("=== 测试Hive连接 ===")
      testHiveConnection(spark)

      // 2. 污染物浓度分析
      println("=== 开始污染物浓度分析 ===")
      val (nationalAvg, regionalAvg) = analyzePollutantConcentration(spark)

      // 3. 污染物时间趋势分析
      println("=== 开始污染物时间趋势分析 ===")
      val (hourlyTrend, dailyTrend) = analyzeTimeTrend(spark)

      // 4. 城市污染排名分析
      println("=== 开始城市污染排名分析 ===")
      val (cityRanking, keyCityAnalysis) = analyzeCityPollution(spark)

      // 5. 污染物类型分析
      println("=== 开始污染物类型分析 ===")
      val pollutantDistribution = analyzePollutantDistribution(spark)

      // 6. 热点分析
      println("=== 开始热点分析 ===")
      val hotspotAnalysis = analyzeHotspotRegions(spark)

      // 7. 写入MySQL
      println("=== 开始写入MySQL ===")
      writeToMySQL(
        nationalAvg,
        regionalAvg,
        hourlyTrend,
        dailyTrend,
        cityRanking,
        keyCityAnalysis,
        pollutantDistribution,
        hotspotAnalysis
      )

      println("=== 所有分析任务完成 ===")

    } catch {
      case e: Exception =>
        println(s"分析过程中出现错误: ${e.getMessage}")
        e.printStackTrace()
    } finally {
      spark.stop()
    }
  }

  // 测试Hive连接
  def testHiveConnection(spark: SparkSession): Unit = {
    try {
      // 测试数据库连接
      println("显示数据库:")
      spark.sql("SHOW DATABASES").show(truncate = false)

      // 切换到目标数据库
      spark.sql(s"USE $HIVE_DATABASE")
      println(s"已切换到Hive数据库: $HIVE_DATABASE")

      // 测试数据表读取
      println("显示表结构:")
      spark.sql(s"DESCRIBE $HIVE_TABLE").show(truncate = false)

      // 测试数据查询
      println("样例数据:")
      val sampleData = spark.sql(s"SELECT * FROM $HIVE_TABLE LIMIT 10")
      sampleData.show()

      // 数据统计
      println("数据统计:")
      val countData =
        spark.sql(s"SELECT COUNT(*) as total_count FROM $HIVE_TABLE")
      countData.show()

      println("Hive连接测试成功!")
    } catch {
      case e: Exception =>
        println(s"Hive连接测试失败: ${e.getMessage}")
        println(s"请检查Hive服务是否启动，数据库[$HIVE_DATABASE]和表[$HIVE_TABLE]是否存在")
        e.printStackTrace()
    }
  }

  // 污染物浓度分析
  def analyzePollutantConcentration(
      spark: SparkSession
  ): (DataFrame, DataFrame) = {
    import spark.implicits._

    println("=== 全国污染物平均浓度分析 ===")

    // 全国各污染物平均浓度
    val nationalAvg = spark.sql(s"""
      SELECT
        type as pollutant_type,
        AVG(concentration_value) as avg_concentration,
        MAX(concentration_value) as max_concentration,
        MIN(concentration_value) as min_concentration,
        STDDEV(concentration_value) as std_concentration,
        COUNT(*) as sample_count
      FROM $HIVE_TABLE
      WHERE concentration_value IS NOT NULL
        AND type IS NOT NULL
      GROUP BY type
      ORDER BY avg_concentration DESC
    """)

    nationalAvg.show(truncate = false)

    println("=== 区域污染物浓度差异分析 ===")

    // 区域污染物浓度差异
    val regionalAvg = spark.sql(s"""
      SELECT
        city,
        type as pollutant_type,
        AVG(concentration_value) as avg_concentration,
        COUNT(*) as sample_count
      FROM $HIVE_TABLE
      WHERE concentration_value IS NOT NULL
        AND type IS NOT NULL
        AND city IS NOT NULL
      GROUP BY city, type
      ORDER BY city, avg_concentration DESC
    """)

    regionalAvg.show(20, truncate = false)

    (nationalAvg, regionalAvg)
  }

  // 时间趋势分析
  def analyzeTimeTrend(spark: SparkSession): (DataFrame, DataFrame) = {
    import spark.implicits._

    println("=== 小时浓度趋势分析 ===")

    // 小时浓度趋势
    val hourlyTrend = spark.sql(s"""
      SELECT
        hour,
        type as pollutant_type,
        AVG(concentration_value) as avg_concentration,
        COUNT(*) as sample_count
      FROM $HIVE_TABLE
      WHERE concentration_value IS NOT NULL
        AND type IS NOT NULL
        AND hour IS NOT NULL
      GROUP BY hour, type
      ORDER BY hour, type
    """)

    hourlyTrend.show(24, truncate = false)

    println("=== 日期浓度趋势分析 ===")

    // 日期浓度趋势（按天）
    val dailyTrend = spark.sql(s"""
      SELECT
        date_value,
        type as pollutant_type,
        AVG(concentration_value) as avg_concentration,
        COUNT(*) as sample_count
      FROM $HIVE_TABLE
      WHERE concentration_value IS NOT NULL
        AND type IS NOT NULL
        AND date_value IS NOT NULL
      GROUP BY date_value, type
      ORDER BY date_value, type
    """)

    dailyTrend.show(20, truncate = false)

    (hourlyTrend, dailyTrend)
  }

  // 城市污染分析
  def analyzeCityPollution(spark: SparkSession): (DataFrame, DataFrame) = {
    import spark.implicits._

    println("=== 城市污染排名分析 ===")

    // 城市污染排名
    val cityRanking = spark.sql(s"""
      SELECT
        city,
        type as pollutant_type,
        AVG(concentration_value) as avg_concentration,
        RANK() OVER(PARTITION BY type ORDER BY AVG(concentration_value) DESC) as pollution_rank,
        COUNT(*) as sample_count
      FROM $HIVE_TABLE
      WHERE concentration_value IS NOT NULL
        AND type IS NOT NULL
        AND city IS NOT NULL
      GROUP BY city, type
      ORDER BY type, pollution_rank
    """)

    cityRanking.show(30, truncate = false)

    println("=== 重点城市污染物分析 ===")

    // 从配置获取重点城市
    val keyCities = SparkConfig.getKeyCities()
    val keyCitiesStr = keyCities.map("'" + _ + "'").mkString(",")

    val keyCityAnalysis = spark.sql(s"""
      SELECT
        city,
        type as pollutant_type,
        AVG(concentration_value) as avg_concentration,
        MAX(concentration_value) as max_concentration,
        MIN(concentration_value) as min_concentration,
        COUNT(*) as sample_count
      FROM $HIVE_TABLE
      WHERE city IN ($keyCitiesStr)
        AND concentration_value IS NOT NULL
        AND type IS NOT NULL
        AND city IS NOT NULL
      GROUP BY city, type
      ORDER BY city, type
    """)

    keyCityAnalysis.show(30, truncate = false)

    (cityRanking, keyCityAnalysis)
  }

  // 污染物类型分布分析
  def analyzePollutantDistribution(spark: SparkSession): DataFrame = {
    import spark.implicits._

    println("=== 污染物类型分布分析 ===")

    val pollutantDistribution = spark.sql(s"""
      SELECT
        type as pollutant_type,
        COUNT(*) as record_count,
        ROUND(COUNT(*) * 100.0 / (SELECT COUNT(*) FROM $HIVE_TABLE), 2) as percentage,
        AVG(concentration_value) as avg_concentration
      FROM $HIVE_TABLE
      WHERE type IS NOT NULL
      GROUP BY type
      ORDER BY record_count DESC
    """)

    pollutantDistribution.show(truncate = false)

    pollutantDistribution
  }

  // 热点区域分析
  def analyzeHotspotRegions(spark: SparkSession): DataFrame = {
    import spark.implicits._
    import org.apache.spark.sql.expressions.Window
    import org.apache.spark.sql.functions._

    println("=== 热点区域分析 ===")

    // 首先添加区域划分信息
    val enrichedData = spark.sql(s"""
      SELECT 
        date_value,
        hour,
        type as pollutant_type,
        city,
        concentration_value,
        CASE 
          WHEN city IN ('哈尔滨', '长春', '沈阳', '大连') THEN '东北'
          WHEN city IN ('北京', '天津', '石家庄', '太原', '呼和浩特') THEN '华北'
          WHEN city IN ('上海', '南京', '杭州', '合肥', '福州', '南昌', '济南', '青岛') THEN '华东'
          WHEN city IN ('广州', '深圳', '南宁', '海口') THEN '华南'
          WHEN city IN ('成都', '重庆', '贵阳', '昆明', '拉萨') THEN '西南'
          WHEN city IN ('西安', '兰州', '西宁', '银川', '乌鲁木齐') THEN '西北'
          WHEN city IN ('武汉', '长沙', '郑州') THEN '华中'
          ELSE '其他'
        END as region
      FROM $HIVE_TABLE
      WHERE concentration_value IS NOT NULL 
        AND type IS NOT NULL 
        AND city IS NOT NULL
        AND date_value IS NOT NULL
        AND hour IS NOT NULL
    """)

    // 计算小时级别的热点分析
    val hotspotAnalysis = enrichedData
      .groupBy("region", "city", "hour", "date_value", "pollutant_type")
      .agg(
        avg("concentration_value").alias("avg_hourly_conc"),
        max("concentration_value").alias("max_hourly_conc"),
        count("*").alias("record_count")
      )

    // 计算热点得分（相对于同类型污染物的平均值）
    val windowSpec = Window.partitionBy("pollutant_type", "hour")
    val hotspotWithScore = hotspotAnalysis
      .withColumn(
        "hotspot_score",
        col("avg_hourly_conc") / avg("avg_hourly_conc").over(windowSpec)
      )
      .withColumn(
        "is_hotspot",
        when(col("hotspot_score") > 1.5, 1).otherwise(0)
      )
      .orderBy(desc("hotspot_score"))

    println("热点分析结果预览:")
    hotspotWithScore.show(20, truncate = false)

    hotspotWithScore
  }

  // 写入MySQL
  def writeToMySQL(
      nationalAvg: DataFrame,
      regionalAvg: DataFrame,
      hourlyTrend: DataFrame,
      dailyTrend: DataFrame,
      cityRanking: DataFrame,
      keyCityAnalysis: DataFrame,
      pollutantDistribution: DataFrame,
      hotspotAnalysis: DataFrame
  ): Unit = {

    val prop = SparkConfig.getMySQLProperties()
    val url = SparkConfig.getMySQLUrl()

    try {
      println(s"准备写入MySQL数据库: ${url.split("/").last.split("\\?").head}")

      // 批量写入配置
      val writeOptions = Map(
        "batchsize" -> "10000",
        "truncate" -> "true"
      )

      println("开始写入全国平均浓度数据...")
      nationalAvg.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "national_avg_concentration", prop)

      println("开始写入区域浓度数据...")
      regionalAvg.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "regional_concentration", prop)

      println("开始写入小时趋势数据...")
      hourlyTrend.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "hourly_trend", prop)

      println("开始写入日趋势数据...")
      dailyTrend.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "daily_trend", prop)

      println("开始写入城市排名数据...")
      cityRanking.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "city_pollution_ranking", prop)

      println("开始写入重点城市分析数据...")
      keyCityAnalysis.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "key_city_analysis", prop)

      println("开始写入污染物分布数据...")
      pollutantDistribution.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "pollutant_distribution", prop)

      println("开始写入热点分析数据...")
      hotspotAnalysis.write
        .mode("overwrite")
        .options(writeOptions)
        .jdbc(url, "hotspot_analysis", prop)

      println("=== 数据已成功写入MySQL ===")

    } catch {
      case e: Exception =>
        println(s"MySQL写入失败: ${e.getMessage}")
        println(s"请检查MySQL服务是否启动，连接地址是否正确: ${url.split("\\?")(0)}")
        e.printStackTrace()
    }
  }
}
