package scala
import org.apache.spark.sql.functions.col
import org.apache.log4j.{Level, Logger}
//不同“建筑结构”和“配备电梯”组合下的房屋数量占比
object job2 {
  def main(): Unit = {
//    Logger.getLogger("org").setLevel(Level.OFF)
    val SparkCreate = new SparkCreate
    val spark = SparkCreate.initializeSparkSession()
    val data = spark.read.format("csv")
      .option("header", "true")
      .load("hdfs://niit-master:9000/user/niit/Input/room.txt")

    val elevatorStats = data
      .groupBy("建筑结构", "配备电梯")
      .count()
      .withColumn("占比", col("count") / data.count())
      .orderBy(col("count").desc)
      .select("建筑结构", "配备电梯", "占比")
      .coalesce(1)


    val mysql = new mysql
    elevatorStats.write.mode("overwrite")
      .jdbc(mysql.URL, "sparkproject.job2", mysql.prpo)
    spark.stop()
  }
}
