package com.chinasoft.scala

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{DoubleType, IntegerType}
import org.apache.spark.sql.{DataFrame, SparkSession}
import java.util.Properties

object SpatialTemporalAnalysis {
  // 数据库常量配置
  private val HIVE_DATABASE = "airtest"
  private val HIVE_TABLE = s"$HIVE_DATABASE.air_test"
  private val MYSQL_HOURLY_TABLE = "hourly_spatial_temporal"
  private val MYSQL_TREND_TABLE = "spatio_temporal_trend"

  def main(args: Array[String]): Unit = {
    // 1. 使用配置类创建SparkSession
    val spark = SparkConfig.getSparkSession("SimplifiedSpatialTemporalAnalysis")

    try {
      // 测试环境连接
      println("=== 测试Hive和MySQL连接 ===")
      testConnections(spark)

      // 读取并预处理数据
      println("=== 读取并预处理数据 ===")
      val analysisDF = readAndPreprocessData(spark)

      // 1. 小时级时空热点分析
      println("=== 开始小时级时空热点分析 ===")
      val hourlyHotspot = analyzeHourlyHotspot(analysisDF)
      hourlyHotspot.show(10, truncate = false)

      // 2. 时空趋势联合分析
      println("=== 开始时空趋势联合分析 ===")
      val spatioTemporalTrend = analyzeSpatioTemporalTrend(analysisDF)
      spatioTemporalTrend.show(10, truncate = false)

      // 写入数据库
      println("=== 开始写入数据库 ===")
      writeToMySQL(hourlyHotspot, spatioTemporalTrend)

      println("=== 空间时间分析完成 ===")

    } catch {
      case e: Exception =>
        println(s"分析过程中出现错误: ${e.getMessage}")
        e.printStackTrace()
    } finally {
      spark.stop()
    }
  }

  // 读取并预处理数据
  def readAndPreprocessData(spark: SparkSession): DataFrame = {
    try {
      val df = spark.table(HIVE_TABLE)
        .filter(col("concentration_value").isNotNull && col("type").isNotNull)
        .withColumn("year", substring(col("date_value"), 1, 4).cast(IntegerType))
        .withColumn("month", substring(col("date_value"), 5, 2).cast(IntegerType))
        .withColumn("day", substring(col("date_value"), 7, 2).cast(IntegerType))
        .withColumn("hour", col("hour").cast(IntegerType))
        .withColumn("datetime",
          when(col("date_value").isNotNull && col("hour").isNotNull,
            to_timestamp(concat(col("date_value"), lit(" "),
              format_string("%02d", col("hour")), lit(":00:00")), "yyyyMMdd HH:mm:ss"))
            .otherwise(lit(null)))
        .withColumn("region", getRegionUDF(col("city")))
        .withColumn("season", getSeasonUDF(col("month")))

      println(s"数据预处理完成，共 ${df.count()} 条记录")
      df
    } catch {
      case e: Exception =>
        println(s"数据读取失败: ${e.getMessage}")
        // 创建测试数据
        createTestData(spark)
    }
  }

  // 创建测试数据（当Hive表不存在时）
  def createTestData(spark: SparkSession): DataFrame = {
    import spark.implicits._

    println("创建测试数据...")
    val testData = Seq(
      ("20230115", "北京", 12, "PM2.5", 85.0, "华北", "冬季"),
      ("20230115", "上海", 12, "PM2.5", 65.0, "华东", "冬季"),
      ("20230115", "广州", 12, "PM2.5", 92.0, "华南", "冬季"),
      ("20230116", "北京", 14, "PM2.5", 78.0, "华北", "冬季"),
      ("20230116", "上海", 14, "PM2.5", 72.0, "华东", "冬季"),
      ("20230116", "广州", 14, "PM2.5", 88.0, "华南", "冬季"),
      ("20230715", "北京", 12, "PM2.5", 45.0, "华北", "夏季"),
      ("20230715", "上海", 12, "PM2.5", 38.0, "华东", "夏季"),
      ("20230715", "广州", 12, "PM2.5", 52.0, "华南", "夏季")
    )

    val df = testData.toDF("date_value", "city", "hour", "type", "concentration_value", "region", "season")
    df
  }

  // 区域划分 UDF
  private val getRegionUDF = udf((city: String) => {
    if (city == null) "未知" else {
      val regions = Map(
        "东北" -> List("哈尔滨", "长春", "沈阳", "大连"),
        "华北" -> List("北京", "天津", "石家庄", "太原", "呼和浩特"),
        "华东" -> List("上海", "南京", "杭州", "合肥", "福州", "南昌", "济南", "青岛"),
        "华南" -> List("广州", "深圳", "南宁", "海口"),
        "西南" -> List("成都", "重庆", "贵阳", "昆明", "拉萨"),
        "西北" -> List("西安", "兰州", "西宁", "银川", "乌鲁木齐"),
        "华中" -> List("武汉", "长沙", "郑州")
      )
      regions.find(_._2.contains(city)).map(_._1).getOrElse("其他")
    }
  })

  // 季节划分 UDF
  private val getSeasonUDF = udf((month: Int) => {
    if (month == null) "未知" else month match {
      case 12 | 1 | 2 => "冬季"
      case 3 | 4 | 5 => "春季"
      case 6 | 7 | 8 => "夏季"
      case 9 | 10 | 11 => "秋季"
      case _ => "未知"
    }
  })

  // 1. 小时级时空热点分析
  def analyzeHourlyHotspot(df: DataFrame): DataFrame = {
    df.groupBy("region", "city", "hour", "date_value", "type")
      .agg(
        avg("concentration_value").alias("avg_hourly_conc"),
        max("concentration_value").alias("max_hourly_conc"),
        count("*").alias("record_count")
      )
      .withColumn("hotspot_score",
        col("avg_hourly_conc") / avg("avg_hourly_conc").over(Window.partitionBy("type", "hour"))
      )
      .withColumn("is_hotspot", when(col("hotspot_score") > 1.5, 1).otherwise(0))
      .orderBy("date_value", "hour", "hotspot_score")
  }

  // 2. 时空趋势联合分析
  def analyzeSpatioTemporalTrend(df: DataFrame): DataFrame = {
    df.groupBy("region", "season", "hour", "type")
      .agg(
        avg("concentration_value").alias("avg_conc"),
        stddev("concentration_value").alias("std_conc"),
        count("*").alias("sample_count")
      )
      .withColumn("cv_conc",
        when(col("avg_conc") === 0, 0.0)
          .otherwise(col("std_conc") / col("avg_conc"))
      )
      .orderBy("region", "season", "hour", "type")
  }

  // 写入MySQL数据库 - 优化版本
  def writeToMySQL(
                    hourlyHotspot: DataFrame,
                    spatioTemporalTrend: DataFrame
                  ): Unit = {
    val prop = SparkConfig.getMySQLProperties()
    val url = SparkConfig.getMySQLUrl()
    val dbName = url.split("/").last.split("\\?").head // 提取数据库名

    // 批量写入配置
    val writeOptions = Map(
      "batchsize" -> "10000",
      "truncate" -> "true"
    )

    try {
      println(s"准备写入MySQL数据库: $dbName")

      // 写入MySQL小时级数据
      println(s"开始写入${MYSQL_HOURLY_TABLE}表...")
      val hourlyData = hourlyHotspot.select(
        col("region"),
        col("city"),
        col("hour"),
        col("date_value"),
        col("type").alias("pollutant_type"),
        col("avg_hourly_conc"),
        col("max_hourly_conc"),
        col("hotspot_score"),
        col("is_hotspot"),
        col("record_count")
      )

      // 打印写入前的统计信息
      println(s"${MYSQL_HOURLY_TABLE}表将写入 ${hourlyData.count()} 条记录")

      hourlyData.write.mode("overwrite")
        .options(writeOptions)
        .jdbc(url, MYSQL_HOURLY_TABLE, prop)
      println(s"${MYSQL_HOURLY_TABLE}表写入完成")

      // 写入MySQL时空趋势数据
      println(s"开始写入${MYSQL_TREND_TABLE}表...")
      val trendData = spatioTemporalTrend.select(
        col("region"),
        col("season"),
        col("hour"),
        col("type").alias("pollutant_type"),
        col("avg_conc"),
        col("std_conc"),
        col("sample_count"),
        col("cv_conc")
      )

      // 打印写入前的统计信息
      println(s"${MYSQL_TREND_TABLE}表将写入 ${trendData.count()} 条记录")

      trendData.write.mode("overwrite")
        .options(writeOptions)
        .jdbc(url, MYSQL_TREND_TABLE, prop)
      println(s"${MYSQL_TREND_TABLE}表写入完成")

      println("=== 所有数据已成功写入MySQL ===")

    } catch {
      case e: Exception =>
        println(s"MySQL写入失败: ${e.getMessage}")
        println(s"请检查MySQL服务是否启动，连接地址是否正确: ${url.split("\\?")(0)}")
        e.printStackTrace()
    }
  }

  // 测试连接
  def testConnections(spark: SparkSession): Unit = {
    // Hive连接测试（仅用于读取数据）
    try {
      println("显示数据库:")
      spark.sql("SHOW DATABASES").show(truncate = false)

      // 尝试切换到目标数据库
      try {
        spark.sql(s"USE $HIVE_DATABASE")
        println(s"已切换到Hive数据库: $HIVE_DATABASE")

        println("显示表:")
        spark.sql("SHOW TABLES").show(truncate = false)

        println("样例数据:")
        spark.sql(s"SELECT * FROM $HIVE_TABLE LIMIT 5").show()

      } catch {
        case e: Exception =>
          println(s"数据库 $HIVE_DATABASE 或表 $HIVE_TABLE 不存在，将使用测试数据")
      }

    } catch {
      case e: Exception =>
        println(s"Hive连接测试失败: ${e.getMessage}")
        println("将继续使用测试数据进行演示")
    }

    // MySQL连接测试
    try {
      val mysqlUrl = SparkConfig.getMySQLUrl()
      val prop = SparkConfig.getMySQLProperties()

      spark.read.jdbc(mysqlUrl, "information_schema.tables", prop)
        .limit(1)
        .show(1)

      println("MySQL连接成功")
    } catch {
      case e: Exception =>
        println(s"MySQL连接失败: ${e.getMessage}")
        println("请检查MySQL配置，否则分析结果无法写入")
    }
  }
}
