package com.tipdm.scalaDemo

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.IntegerType
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

// 定义房屋数据的 case class 模型，对应 CSV 文件的每一列字段
case class House(
  selling_price: Double, bedrooms_num: Double, bathroom_num: Double, housing_area: Double,
  parking_area: Double, floor_num: Double, housing_rating: Double, built_area: Double,
  basement_area: Double, year_built: Int, year_repair: Int,
  latitude: Double, longitude: Double, sale_data: String)

object WordCount {

  // 统计某列的缺失值数量
  def null_count(data: DataFrame, col: String): Unit = {
    println(col + " 缺失值数量：" + (data.count() - data.na.drop().count()))
  }

  // 统计某列的最大值、最小值、平均值、标准差，并调用缺失值检查
  def max_min_mean_std(data: DataFrame, col: String): Unit = {
    println(col + ":")
    data.selectExpr(s"max($col) as max").foreach(x => println("max: " + x))
    data.selectExpr(s"min($col) as min").foreach(x => println("min: " + x))
    data.selectExpr(s"mean($col) as mean").foreach(x => println("mean: " + x))
    data.selectExpr(s"stddev($col) as std").foreach(x => println("std: " + x))
    null_count(data, col)
    println("—" * 20)
  }

  // 年龄分段函数（用于 UDF）
  def AgeTitle(age: Int): String = {
    if (age >= 30) "old"
    else if (age >= 10) "middle"
    else "new"
  }

  def main(args: Array[String]): Unit = {
    // 初始化 Spark 环境
    val conf = new SparkConf().setAppName("wordcount").setMaster("local")
    val sc = new SparkContext(conf)
    val spark = SparkSession.builder().appName("test").master("local").getOrCreate()
    Logger.getLogger("org").setLevel(Level.OFF)
    import spark.implicits._

    // 读取 CSV 文件并处理首行（去掉表头）
    val houseRDD = sc.textFile("C:\\Users\\admin\\Desktop\\house.csv")
    .mapPartitionsWithIndex((idx, iter) => if (idx == 0) iter.drop(1) else iter)

    // 转换为 DataFrame
    val houseDF = houseRDD.map(_.split(","))
    .map(x => House(
      x(0).trim.toDouble, x(1).trim.toDouble, x(2).trim.toDouble, x(3).trim.toDouble,
      x(4).trim.toDouble, x(5).trim.toDouble, x(6).trim.toDouble, x(7).trim.toDouble,
      x(8).trim.toDouble, x(9).trim.toInt, x(10).trim.toInt, x(11).trim.toDouble,
      x(12).trim.toDouble, x(13).trim
    )).toDF()

    // 对每列数据做基本统计
    houseDF.columns.foreach { colName =>
      if (colName == "year_built" || colName == "year_repair" || colName == "sale_data") {
        println(colName + ":")
        null_count(houseDF, colName)
        println("-" * 20)
      } else {
        max_min_mean_std(houseDF, colName)
      }
    }

    // 处理销售日期为 date 类型并计算季度字段
    val houseDate = houseDF.na.drop()
    .withColumn("date", to_date(col("sale_data"), "yyyyMMdd"))

    val houseQuarter = houseDate.withColumn("quarter", quarter(col("date")))

          // 按季度统计房屋销售数量和总金额
          println("各季度销售数量：")
          houseQuarter.groupBy("quarter").count().orderBy("quarter").show()

          println("各季度销售总金额：")
          houseQuarter.groupBy("quarter").sum("selling_price").orderBy("quarter").show()

    // 房屋评分分布统计
    println("房屋评分分布：")
    houseQuarter.groupBy("housing_rating").count().orderBy(desc("count")).show()

    // 单位售价与房屋评分关系
    println("房屋评分与单位售价关系：")
    houseQuarter.groupBy("housing_rating")
      .agg(avg(col("selling_price") / col("housing_area")).alias("unit_price_avg"))
      .orderBy(desc("housing_rating"))
      .show()

    // 修缮房屋房龄分析
    val houseRepair = houseDF.filter("year_repair != 0")
      .withColumn("houseAge", lit(2020) - col("year_built").cast(IntegerType))

    // 注册并使用房龄分组 UDF
    val houseAgeFitleUDF = udf((age: Int) => AgeTitle(age))
    val houseAgeTitle = houseRepair.withColumn("houseAgeTitle", houseAgeFitleUDF(col("houseAge")))

    println("修缮房屋按房龄分类分布：")
    houseAgeTitle.select("houseAge", "houseAgeTitle").show()
    houseAgeTitle.groupBy("houseAgeTitle").count().show()
  }
}
