package core_sql.day02.homework

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 需求：
  *   1：全网最受喜爱的老师
  *   2：每个学科内受喜爱的老师
  *
  *   三种以上的方式实现
  */
object Teacher {
  def main(args: Array[String]): Unit = {
    //全网最喜爱的老师
    val conf: SparkConf = new SparkConf()
      conf.setAppName("teacher").setMaster("local[*]")

    val sc: SparkContext = new SparkContext(conf)

    val file: RDD[String] = sc.textFile("src/main/file/teacher.log")

    val teacherOne: RDD[(String, Int)] = file.map(line => {
      val split: Array[String] = line.split("/")
      (split(3), 1)
    })
    val teachercount: RDD[(String, Int)] = teacherOne.reduceByKey(_+_)
    val coalesce: RDD[(String, Int)] = teachercount.coalesce(1)
    val sortedByCount: RDD[(String, Int)] = coalesce.sortBy(t=>t._2)   //  局部排序

    sortedByCount.saveAsTextFile("E://tmp/teacher")
    sortedByCount.foreach(println(_))

  }

}
