package com._51doit.spark02

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

//全局的TopN
object AllWorkTest {


  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(this.getClass.getSimpleName)
    val sc: SparkContext = new SparkContext(conf)

    val path: RDD[String] = sc.textFile("D:\\07spark\\spark-day02\\work\\teacher.log")

  //path.foreach(println)

    val res: RDD[((String, String), Int)] = path.map(tr => {
      val str: Array[String] = tr.split("/+")

      val Tname: String = str(2)

      val allsub: String = str(1)

      val substr: Array[String] = allsub.split("\\.")
      val subject: String = substr(0)

      //自定义元组 （K，V）
      ((Tname, subject), 1)

    })

//分组聚合
    val res1: RDD[((String, String), Int)] = res.reduceByKey(_+_)
//按照Value 排序

    val res2: Array[((String, String), Int)] = res1.sortBy(_._2,false).collect()


    res2.foreach(println)












  }



}
