package scala
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.{max, mean, min, rank}

import java.util.logging.{Level, Logger}

object job4 {

  def main(): Unit = {
    val SparkCreate = new SparkCreate
    val spark = SparkCreate.initializeSparkSession()
    import spark.implicits._

    val houses = spark.read.format("csv")
      .option("header", "true")
      .load("hdfs://niit-master:9000/user/niit/Input/room.txt")

    val windowSpec = Window.partitionBy("房屋所属市辖区").orderBy($"单价（元/平方米）".desc)

    val result = houses.withColumn("rank", rank().over(windowSpec))
      .where($"rank" === 1)
      .select("房屋所属市辖区", "房屋地址（街道）", "单价（元/平方米）")

    val mysql = new mysql
    result.write.mode("overwrite")
      .jdbc(mysql.URL, "sparkproject.job4", mysql.prpo)
    spark.stop()
  }
  }
