package com.inspur2

import org.apache.spark.sql.SparkSession

case class nba(year:String, date:String, champion:String, score:String, runnerUp:String)

object ChampionsNumber2 { //冠军次数统计 spark SQL实现

  def main(args: Array[String]): Unit = {
    val sess = SparkSession.builder()
      .appName("Champions count2")
      .master("local")
      .getOrCreate()

    import sess.implicits._

    val champions = sess.read.textFile("hdfs://192.168.66.88:8020/0622/clear_data/part-00000")
      .map(_.split(","))
      .map(x=>nba(x(0), x(1), x(2), x(3), x(4)))

    champions.createOrReplaceTempView("champions")

    sess
      .sql("select champion,champion_number from " +
        "(select champion, count(1) champion_number from champions group by champion) order by champion_number desc")
      .repartition(1)
//      .show(100,false)
      .write.csv("hdfs://192.168.66.88:8020/0622/nba_champions_number_sparkSQL")

  }

}
