package cn.lecosa.spark
//val lines = sc.textFile("hdfs://park01:9000/home/broadcast.txt", 2)
package mypro


import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkContext, SparkConf}

/**
 * Created by 166 on 2017/9/5.
 */
object FavTeacher {
  def main(args: Array[String]) {
    Logger.getLogger("org.apache.spark").setLevel(Level.OFF)
    val conf = new SparkConf().setAppName(this.getClass.getName).setMaster("local[2]")//local[*]代表用多个线程跑,2代表用两个线程跑
    val sc = new SparkContext(conf)

    //读取数据
    val lines: RDD[String] = sc.textFile(args(0))
    //整理数据
    val subjectAndTeacher:RDD[(String,String)]=lines.map(line=> {
     val url = "";//new URL(line)
      val host = "";//url.getHost
      val subject ="";// host.substring(0, host.indexOf("."))
      val teacher = ""// url.getPath.substring(1)   //去掉路径前面的"/"
      (subject, teacher)
    })

    //聚合
    val reduce = subjectAndTeacher.map((_,1)).reduceByKey(_+_)
    //println(reduce.collect().toBuffer)

    //按学科分组
    val grouped: RDD[(String, Iterable[((String, String), Int)])] = reduce.groupBy(_._1._1)//迭代器不能排序,需要将它变成List

    //二次排序
    val result: RDD[(String, List[((String, String), Int)])] = grouped.mapValues(_.toList.sortBy(_._2).reverse.take(2))//用scala的语法
    val arr: Array[(String, List[((String, String), Int)])] = result.collect()
    println(arr.toBuffer)
    
  }
}