package com.chinasoft.scala

import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import java.sql.Timestamp

/** 数据ETL作业 - 将HDFS中的原始数据导入到Hive表 这是连接原始数据和分析的桥梁
  */
object DataETL {

  // 配置常量
  private val HDFS_PATH = "/user/airquality/preprocessed_data/output.csv"
  private val HIVE_DATABASE = "airtest"
  private val HIVE_TABLE = "air_test"
  private val MYSQL_TEMP_TABLE = "chinaCities2025"

  def main(args: Array[String]): Unit = {
    val spark = SparkConfig.getSparkSession("DataETL")

    try {
      println("=== 开始数据ETL处理 ===")

      // 方案1: 从HDFS直接读取CSV
      if (args.length > 0 && args(0) == "hdfs") {
        println("使用HDFS数据源")
        loadFromHDFS(spark)
      } else {
        // 方案2: 从MySQL临时表读取（推荐）
        println("使用MySQL临时表数据源")
        loadFromMySQL(spark)
      }

      println("=== 数据ETL完成 ===")

    } catch {
      case e: Exception =>
        println(s"ETL过程中出现错误: ${e.getMessage}")
        e.printStackTrace()
    } finally {
      spark.stop()
    }
  }

  /** 方案1: 从HDFS直接读取CSV文件并导入Hive
    */
  def loadFromHDFS(spark: SparkSession): Unit = {
    println("=== 从HDFS读取原始CSV数据 ===")

    // 定义CSV数据的Schema（根据HdfsApi.java的读取逻辑）
    val csvSchema = StructType(
      Array(
        StructField("date_value", StringType, true),
        StructField("hour", IntegerType, true),
        StructField("type", StringType, true),
        StructField("city", StringType, true),
        StructField("concentration_value", DoubleType, true)
      )
    )

    try {
      // 读取HDFS上的CSV文件
      val rawDF = spark.read
        .option("header", "false")
        .option("inferSchema", "false")
        .schema(csvSchema)
        .csv(HDFS_PATH)

      println(s"从HDFS读取到 ${rawDF.count()} 条记录")
      rawDF.show(10)

      // 转换并插入Hive
      transformAndInsertToHive(spark, rawDF)

    } catch {
      case e: Exception =>
        println(s"从HDFS读取数据失败: ${e.getMessage}")
        println("提示：请确保HDFS路径存在且可访问")
        throw e
    }
  }

  /** 方案2: 从MySQL临时表读取并导入Hive（推荐）
    */
  def loadFromMySQL(spark: SparkSession): Unit = {
    println("=== 从MySQL临时表读取数据 ===")

    val mysqlUrl = SparkConfig.getMySQLUrl()
    val mysqlProps = SparkConfig.getMySQLProperties()

    try {
      // 从MySQL临时表读取数据
      val mysqlDF = spark.read
        .jdbc(mysqlUrl, MYSQL_TEMP_TABLE, mysqlProps)

      println(s"从MySQL读取到 ${mysqlDF.count()} 条记录")
      mysqlDF.show(10)

      // 转换字段名以匹配Hive表结构
      val renamedDF = mysqlDF
        .select(
          col("date").alias("date_value"),
          col("hour"),
          col("type"),
          col("city"),
          col("concentration_value")
        )

      // 转换并插入Hive
      transformAndInsertToHive(spark, renamedDF)

    } catch {
      case e: Exception =>
        println(s"从MySQL读取数据失败: ${e.getMessage}")
        println("提示：请先运行 DealHiveResult.java 导入原始数据到MySQL")
        throw e
    }
  }

  /** 数据转换并插入Hive表
    */
  def transformAndInsertToHive(
      spark: SparkSession,
      sourceDF: DataFrame
  ): Unit = {
    println("=== 开始数据转换和清洗 ===")

    // 数据转换和扩充
    val enrichedDF = sourceDF
      .filter(
        col("concentration_value").isNotNull && col("concentration_value") > 0
      )
      .filter(col("type").isNotNull && col("city").isNotNull)
      .withColumn("station_code", lit("AUTO_GEN"))
      .withColumn("station_name", concat(col("city"), lit("监测站")))
      .withColumn(
        "province",
        when(col("city").rlike("北京"), "北京市")
          .when(col("city").rlike("上海"), "上海市")
          .when(col("city").rlike("广州|深圳"), "广东省")
          .when(col("city").rlike("成都"), "四川省")
          .when(col("city").rlike("武汉"), "湖北省")
          .otherwise("未知省份")
      )
      .withColumn("year", substring(col("date_value"), 1, 4).cast(IntegerType))
      .withColumn("month", substring(col("date_value"), 6, 2).cast(IntegerType))
      .withColumn("day", substring(col("date_value"), 9, 2).cast(IntegerType))
      .withColumn(
        "unit",
        when(col("type").isin("PM2.5", "PM10", "SO2", "NO2"), "μg/m³")
          .when(col("type") === "CO", "mg/m³")
          .when(col("type") === "O3", "μg/m³")
          .otherwise("μg/m³")
      )
      .withColumn(
        "aqi",
        when(col("type") === "PM2.5" && col("concentration_value") <= 35, 50)
          .when(
            col("type") === "PM2.5" && col("concentration_value") <= 75,
            100
          )
          .when(col("type") === "PM10" && col("concentration_value") <= 50, 50)
          .when(
            col("type") === "PM10" && col("concentration_value") <= 150,
            100
          )
          .otherwise(150)
      )
      .withColumn(
        "aqi_level",
        when(col("aqi") <= 50, "优")
          .when(col("aqi") <= 100, "良")
          .when(col("aqi") <= 150, "轻度污染")
          .otherwise("中度污染")
      )
      .withColumn("primary_pollutant", col("type"))
      .withColumn(
        "temperature",
        lit(20.0) + (rand() * lit(15.0))
      ) // 模拟温度 15-35度
      .withColumn("humidity", lit(40.0) + (rand() * lit(40.0))) // 模拟湿度 40-80%
      .withColumn("wind_speed", lit(1.0) + (rand() * lit(10.0))) // 模拟风速 1-11m/s
      .withColumn("wind_direction", lit("北风"))
      .withColumn(
        "pressure",
        lit(1013.0) + (rand() * lit(50.0))
      ) // 模拟气压 1013-1063hPa
      .withColumn("data_source", lit("ETL_IMPORT"))
      .withColumn("create_time", current_timestamp())
      .withColumn("update_time", current_timestamp())
      .withColumn("data_date", to_date(col("date_value"), "yyyy-MM-dd"))

    // 选择最终字段顺序（匹配Hive表结构）
    val finalDF = enrichedDF.select(
      "station_code",
      "station_name",
      "city",
      "province",
      "date_value",
      "year",
      "month",
      "day",
      "hour",
      "type",
      "concentration_value",
      "unit",
      "aqi",
      "aqi_level",
      "primary_pollutant",
      "temperature",
      "humidity",
      "wind_speed",
      "wind_direction",
      "pressure",
      "data_source",
      "create_time",
      "update_time",
      "data_date"
    )

    println("=== 数据转换完成，样例数据：===")
    finalDF.show(5, truncate = false)
    println(s"转换后数据总数: ${finalDF.count()}")

    // 切换到airtest数据库
    spark.sql(s"USE $HIVE_DATABASE")

    // 创建临时视图
    finalDF.createOrReplaceTempView("temp_air_data")

    // 插入数据到Hive表
    println("=== 开始插入数据到Hive表 ===")
    spark.sql(s"""
      INSERT OVERWRITE TABLE $HIVE_TABLE
      SELECT * FROM temp_air_data
    """)

    // 验证插入结果
    val resultCount = spark
      .sql(s"SELECT COUNT(*) as count FROM $HIVE_TABLE")
      .collect()(0)
      .getLong(0)
    println(s"=== 成功插入 $resultCount 条数据到 $HIVE_DATABASE.$HIVE_TABLE ===")

    // 显示插入结果样例
    println("=== Hive表数据样例 ===")
    spark.sql(s"SELECT * FROM $HIVE_TABLE LIMIT 5").show(truncate = false)
  }
}
