package core_sql.day03

import java.net.URL

import util.FileUtil
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partitioner, SparkConf, SparkContext}

/**
  * Created by root on 2019/3/10.
  * 分组，在组内进行topn
  * sortby 是不是全局排序    还有sortbykey
  * 自定义分区
  *
  */
object SubjectFavTeacher {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("subjectTeacher").setMaster("local[*]")

    val sc: SparkContext = new SparkContext(conf)

    val lines: RDD[String] = sc.textFile(FileUtil.TEACHER_LOG)

    val subjectTeacherAndOne: RDD[((String, String), Int)] = lines.map(line => {
      val index = line.lastIndexOf("/") + 1
      val teacher = line.substring(index)
      //切分学科
      val host = new URL(line).getHost
      val subject: String = host.substring(0, host.indexOf("."))
      ((subject, teacher), 1)
    })

    //聚合
    val reduced: RDD[((String, String), Int)] = subjectTeacherAndOne.reduceByKey(_+_)

    // 重新分区（让相同的数据在一个分区内，且一个分区只有一个学科）
    val subjectPartitioner: SubjectPartitioner = new SubjectPartitioner
    //分好区的数据
    val partitioned: RDD[((String, String), Int)] = reduced.partitionBy(subjectPartitioner)

    val result: RDD[((String, String), Int)] = partitioned.mapPartitions(_.toList.sortBy(_._2).reverse.take(3).iterator)

    result.foreach(println(_))
    sc.stop()

  }

}

class SubjectPartitioner extends Partitioner{
  //分区规则
  val rules = Map("bigdata" -> 0,"javaee" -> 1,"php" ->2,"python"->3)
  override def numPartitions: Int = 4

  //key 的类型是学科跟老师（php,laoli）
  //返回值是分区的分区号
  override def getPartition(key: Any): Int = {
    //拿到学科，然后匹配规则
    val subject: String = key.asInstanceOf[Pair[String,String]]._1
    //匹配匹配规则
    rules(subject)
  }
}
