package org.whl.pollution.analysis

import org.apache.spark.sql.functions._
import org.apache.spark.sql.{Row, SparkSession}
import org.whl.util.spark.SparkUtil
/**
 * @author 王浩霖
 * @version 1.0.0 2024/12/25 20:55
 */
object TemAirquality {
  def main(args: Array[String]): Unit = {
    //    val spark = SparkSession.builder.appName("AirQualityAnalysis").enableHiveSupport().getOrCreate()
    val spark: SparkSession = SparkUtil()
    // 从Hive数据库加载数据
    val df = spark.read.table("wanghaolin.whl_pollution")

    // 定义温度分段
    val tempRanges = Array((10, 20), (20, 30), (30, 40), (40, 50), (50, 60))

    // 初始化结果DataFrame的列
    var resultDF = spark.createDataFrame(Seq(
      ("10~20", 0, 0, 0, 0),
      ("20~30", 0, 0, 0, 0),
      ("30~40", 0, 0, 0, 0),
      ("40~50", 0, 0, 0, 0),
      ("50~60", 0, 0, 0, 0)
    )).toDF("Temperature", "Moderate", "Good", "Hazardous", "Poor")

    //调用隐式转换
    import spark.implicits._

    // 对每个温度范围进行过滤和计数
    tempRanges.foreach { range =>
      val filteredDF = df.filter($"Temperature" >= range._1 && $"Temperature" < lit(range._2))
        .groupBy("Air_Quality")
        .count()
        .collect()

      // 更新结果DataFrame
      filteredDF.foreach { case Row(quality: String, count: Long) =>
        val index = quality match {
          case "Moderate" => 0
          case "Good"    => 1
          case "Hazardous" => 2
          case "Poor"    => 3
        }
        //        resultDF = resultDF.withColumn(s"$" + quality, resultDF($"$" + quality) + lit(count.toInt))
        resultDF = resultDF.withColumn(quality, resultDF(quality) + lit(count.toInt))
      }
    }

    // 将结果DataFrame注册为临时视图，以便写入Hive表
    resultDF.createOrReplaceTempView("temp_view")

    // 将结果写入Hive表
    spark.sql("CREATE TABLE IF NOT EXISTS wanghaolin.whl_tem_air AS SELECT * FROM temp_view").collect()

    // 停止SparkSession
    spark.stop()
  }



}