package com.scala.learn.project

import java.net.URL

import org.apache.spark.rdd.RDD
import org.apache.spark.{Partitioner, SparkConf, SparkContext}


/**
  * 计算每个学科的前3名
  * 类似于二次排序
  *
  * 如果学科已经知道了
  */
object FavTeacher2 {


  def main(args: Array[String]): Unit = {
    //初始化conf
    var conf = new SparkConf().setAppName("FavTeacher")

    var sc = new SparkContext(conf)
    //加载文件
    val rdd1 = sc.textFile("D:\\tmp\\url")
    // https://ke.qq.com/bigData/xiaowang/index.html
    val subjectAndTeacher = rdd1.map(line => {
      var url = new URL(line)
      var path = url.getPath
      var res = path.split("/")
      //bigData
      var subject = res(1)
      var teacher = res(2)
      (subject, teacher)
    })
    //2.2获取分区器
    var subPartitioner = new SubPartitioner()

    //1、将同样学科，同样老师的聚合在一起；并且分区
    val reducedAndPartitioned: RDD[((String, String), Int)]
    = subjectAndTeacher.map((_, 1)).reduceByKey(subPartitioner, _ + _)

    val result: RDD[((String, String), Int)] = reducedAndPartitioned.mapPartitions(iter => iter.toList.sortBy(_._2).reverse.take(2).iterator)

    result.saveAsTextFile("D:\\tmp\\log")

    sc.stop()

  }

  /**
    * 自定义分区器
    * 实现package org.apache.spark.Partitioner类
    *
    * 可以读取数据库等系统的数据，
    */


  class SubPartitioner extends Partitioner {
    val rules2 = Map("bigdata" -> 1, "java" -> 2, "php" -> 3)

    //分区的数量
    override def numPartitions: Int = rules2.size

    //分区的规则,什么时候调用，传过来的值是什么，
    //传过来的是根据
    //    val partitioned: RDD[(String, (String, Int))] = reduced.map(t => (t._1._1, (t._1._2, t._2))).partitionBy(subPartitioner)
    //所以key是一个String
    override def getPartition(key: Any): Int = {
      val sub = key.asInstanceOf[Tuple2[String, String]]
      rules2.getOrElse(sub._1, 0)
    }
  }

}
