package scala
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.functions.avg
import org.apache.spark.sql.functions.count

import org.apache.spark.sql.catalyst.dsl.expressions.{DslExpression, StringToAttributeConversionHelper}
import java.util.logging.{Level, Logger}

object job1 {
//不同房屋用途的房屋数量和平均单价
  def main(): Unit = {
    val SparkCreate = new SparkCreate
    val spark = SparkCreate.initializeSparkSession()

    val data = spark.read.format("csv")
      .option("header", "true")
      .load("hdfs://niit-master:9000/user/niit/Input/room.txt")

    val houseTypeStats = data.groupBy("房屋用途")
      .agg(
        count("*").alias("房屋数量"),
        avg("单价（元/平方米）").alias("平均单价")
      )
      .orderBy(col("房屋数量").desc)

    val mysql = new mysql
    houseTypeStats.write.mode("overwrite")
      .jdbc(mysql.URL, "sparkproject.job1", mysql.prpo)
    spark.stop()
  }
}
