package org.whl.pollution.etl

import org.apache.spark.sql.SaveMode
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.{DoubleType, StringType}
import org.whl.util.spark.SparkUtil

/**
 * @author 王浩霖
 * @version 1.0.0 2024/12/25 17:58
 */
object PollutionEtl {
  def main(args: Array[String]): Unit = {
    // 获取 SparkSession 对象
    val sparkSession = SparkUtil()

    // 定义源数据在HDFS文件系统的路径
    val originPath: String = "/pollution-data/ods/pollution_dataset.csv"
    // 读取 csv 文件 获取 dataFrame
    val df = sparkSession.read.option("header", "true").csv(originPath)

    // 字段集合
    val cols = Array("Temperature","Humidity","PM25","PM10","NO2","SO2","CO","Proximity_to_Industrial_Areas","Population_Density","Air_Quality")

    // 过滤掉有空值的数据行
    df.na.drop(cols)

    // 为字段指定数据类型
    val table = df.select(
      col("Temperature") cast (DoubleType),
      col("Humidity") cast (DoubleType),
      col("PM25") cast (DoubleType),
      col("PM10") cast (DoubleType),
      col("NO2") cast (DoubleType),
      col("SO2") cast (DoubleType),
      col("CO") cast (DoubleType),
      col("Proximity_to_Industrial_Areas") cast (DoubleType),
      col("Population_Density") cast (DoubleType),
      col("Air_Quality") cast (StringType),
    )
    // 保存数据到 hive
    table.write.mode(SaveMode.Overwrite).saveAsTable("wanghaolin.whl_pollution")
    // 释放资源
    sparkSession.stop()
  }
}
