package scala

import javafx.beans.binding.Bindings.select
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._

import java.util.logging.{Level, Logger}

object job5 {
  def main(): Unit = {

    val SparkCreate = new SparkCreate
    val spark = SparkCreate.initializeSparkSession()
    val data = spark.read.format("csv")
      .option("header", "true")
      .load("hdfs://niit-master:9000/user/niit/Input/room.txt")
    val priceAndAreaByDistrict = data
      .groupBy("房屋所属市辖区")
      .agg(
        min("单价（元/平方米）").alias("最低单价"),
        max("单价（元/平方米）").alias("最高单价"),
        mean("单价（元/平方米）").alias("平均单价"),
        mean("建筑面积（平方米）").alias("平均建筑面积")
      )
      .orderBy("平均单价")
      .select("房屋所属市辖区", "最低单价", "最高单价", "平均单价", "平均建筑面积")
      .coalesce(1)

    val mysql = new mysql
    priceAndAreaByDistrict.write.mode("overwrite")
      .jdbc(mysql.URL, "sparkproject.job5", mysql.prpo)
    spark.stop()
  }
}
