package cn.lhz.global.etl

import cn.lhz.util.spark.SparkUtil
import org.apache.spark.sql.SaveMode
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.IntegerType

/**
 * 对 global.csv 进行数据清洗，过滤掉包含空值的行
 *
 * @author 年阔正
 * @version 1.0.0
 */
object GlobalEtl {
  def main(args: Array[String]): Unit = {
    // 获取 SparkSession 对象
    val sparkSession = SparkUtil()

    // 定义源数据在HDFS文件系统的路径
    val originPath: String = "/air-data/ods/AirGlobal.csv"

    // 读取 CSV 文件，获取 DataFrame
    val df = sparkSession.read.option("header", "true").csv(originPath)

    // 字段集合
    val cols = Array("country", "city", "aqi_value", "aqi_category",
      "co_aqi_value", "co_aqi_category", "ozone_aqi_value", "ozone_aqi_category",
      "no2_aqi_value", "no2_aqi_category", "pm2_5_aqi_value", "pm2_5_aqi_category")

    // 过滤掉有空值的数据行
    val cleanedDf = df.na.drop(cols)

    // 为字段指定数据类型
    val table = cleanedDf.select(
      col("country"),
      col("city"),
      col("aqi_value") cast (IntegerType),
      col("aqi_category"),
      col("co_aqi_value") cast (IntegerType),
      col("co_aqi_category"),
      col("ozone_aqi_value") cast (IntegerType),
      col("ozone_aqi_category"),
      col("no2_aqi_value") cast (IntegerType),
      col("no2_aqi_category"),
      col("pm2_5_aqi_value") cast (IntegerType),
      col("pm2_5_aqi_category")
    )

    // 保存数据到 Hive 表
    table.write.mode(SaveMode.Overwrite).saveAsTable("air.dwd_global")

    // 释放资源
    sparkSession.stop()
  }
}