package core_sql.day03

import java.net.URL

import util.FileUtil
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partitioner, SparkConf, SparkContext}

import scala.collection.mutable

/**
  * Created by root on 2019/3/10.
  * 分区规则自己传入,优化执行过程，减少shuffle的次数
  *
  */
object SubjectFavTeacher3 {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("subjectTeacher").setMaster("local[*]")

    val sc: SparkContext = new SparkContext(conf)

    val lines: RDD[String] = sc.textFile(FileUtil.TEACHER_LOG)

    val subjectTeacherAndOne: RDD[((String, String), Int)] = lines.map(line => {
      val index = line.lastIndexOf("/") + 1
      val teacher = line.substring(index)
      //切分学科
      val host = new URL(line).getHost
      val subject: String = host.substring(0, host.indexOf("."))
      ((subject, teacher), 1)
    })

    //计算学科的数量,先提交一次任务（触发action）结果返回了才会执行下面的代码
    val subjects: Array[String] = subjectTeacherAndOne.map(_._1._1).distinct().collect()

    //自定义分区（让相同的数据在一个分区内，且一个分区中只有一个学科）
    val subPartitioner: SubjectPartitioner_v2 = new SubjectPartitioner_v2(subjects)
    //聚合
    val reduced: RDD[((String, String), Int)] = subjectTeacherAndOne.reduceByKey(subPartitioner,_+_)

    //排序（在每个分区内部进行排序）
    val result: RDD[((String, String), Int)] = reduced.mapPartitions(_.toList.sortBy(_._2).reverse.take(3).iterator)

    result.foreach(println(_))

    sc.stop()

  }

  // 自定义分区
  class SubjectPartitioner_v2(val subs:Array[String]) extends Partitioner{
    private val rules: mutable.HashMap[String, Int] = new mutable.HashMap[String,Int]()
    var i = 1;
    for(sb<-subs){
      rules(sb) = i
      i+=1
    }


    override def numPartitions: Int = subs.length+1

    override def getPartition(key: Any): Int = {
      val subject: String = key.asInstanceOf[Pair[String,String]]._1
      rules.getOrElse(subject,0)
    }
  }

}
