package com.fwmagic.spark.core.cases.groupwithtopn

import org.apache.spark.rdd.{RDD, ShuffledRDD}
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.mutable.ArrayBuffer

/**
  * 分组TopN:统计每门学科中访问次数排在前3名的老师
  * 1、对学科和老师组合进行分组统计数量
  * 2、取出前3个
  *
  * 方式五：自定义分区器
  * 聚合计算后自定义分区器重新分区（保证一个分区中只有一个学科的数据），new ShuffledRDD分区
  * 然后按照指定方式排序
  */
object FavoriteTeacher7 {
    def main(args: Array[String]): Unit = {
        val isLocal = args(0)

        val conf = new SparkConf().setAppName(this.getClass.getSimpleName)

        if (isLocal.toBoolean) {
            conf.setMaster("local[1]")
        }

        val sc = new SparkContext(conf)
        sc.setLogLevel("WARN")
        //读取数据
        val lines: RDD[String] = sc.textFile(args(1))

        //切分数据
        /* http://bigdata.fwmagic.com/huangzhong */
        val subjectTeacherAndOne: RDD[((String, String), Int)] = lines.map(line => {
            val words: Array[String] = line.split("/")
            val subject = words(2).split("\\.")(0)
            val teacher = words(3)
            ((subject, teacher), 1)
        })

        //获取所有的学科，收集到Driver端
        val subjects: Array[String] = subjectTeacherAndOne.map(_._1._1).distinct().collect()

        //根据自定义分区进行聚合计算,达到一个（学科，老师）一个分区的效果
        val reduced: RDD[((String, String), Int)] = subjectTeacherAndOne.reduceByKey(_ + _)

        //取topN
        val topN = args(2).toInt

        //对原来数据进行整理
        val mapedRDD: RDD[((String, String, Int), Null)] = reduced.map(t => ((t._1._1, t._1._2, t._2), null))

        //自定义排序规则：按照(count)次数进行排序
        val sortRules: Ordering[(String, String, Int)] = Ordering[Int].on[(String, String, Int)](-_._3)

        //Driver端创建自定义分区器
        val subjectPartitioner = new SubjectPartitioner2(subjects)

        //重新分区并按照指定的顺序对key进行排序
        val result: ShuffledRDD[(String, String, Int), Null, Nothing] = new ShuffledRDD(mapedRDD, subjectPartitioner).setKeyOrdering(sortRules)

        //取出所有的keys
        //val keys: RDD[(String, String, Int)] = result.keys

        //取topN
        result.foreachPartition(it => {
            var count = 1
            it.foreach(tp => {
                if (count <= topN) {
                    println(tp._1)
                    count += 1
                }
            })
        })

        sc.stop()
    }
}

/*
预期计算结果：
(javaee,machao,5)
(javaee,sunquan,5)
(javaee,zhangfei,3)
(javaee,sunshangxiang,2)
-- (javaee,caocao,1)

(bigdata,liubei,6)
(bigdata,guanyu,4)
(bigdata,zhaoyun,2)
-- (bigdata,huangzhong,1)

(python,lvbu,4)
(python,zhugeliang,3)
(python,liushan,2)
-- (python,diaochan,1)

准备数据：
http://bigdata.fwmagic.com/huangzhong
http://bigdata.fwmagic.com/zhaoyun
http://bigdata.fwmagic.com/zhaoyun
http://bigdata.fwmagic.com/liubei
http://bigdata.fwmagic.com/liubei
http://bigdata.fwmagic.com/liubei
http://bigdata.fwmagic.com/liubei
http://bigdata.fwmagic.com/liubei
http://bigdata.fwmagic.com/liubei
http://bigdata.fwmagic.com/guanyu
http://bigdata.fwmagic.com/guanyu
http://bigdata.fwmagic.com/guanyu
http://bigdata.fwmagic.com/guanyu
http://javaee.fwmagic.com/zhangfei
http://javaee.fwmagic.com/zhangfei
http://javaee.fwmagic.com/zhangfei
http://javaee.fwmagic.com/machao
http://javaee.fwmagic.com/machao
http://javaee.fwmagic.com/machao
http://javaee.fwmagic.com/machao
http://javaee.fwmagic.com/machao
http://javaee.fwmagic.com/sunquan
http://javaee.fwmagic.com/sunquan
http://javaee.fwmagic.com/sunquan
http://javaee.fwmagic.com/sunquan
http://javaee.fwmagic.com/sunquan
http://javaee.fwmagic.com/caocao
http://javaee.fwmagic.com/sunshangxiang
http://javaee.fwmagic.com/sunshangxiang
http://python.fwmagic.com/lvbu
http://python.fwmagic.com/lvbu
http://python.fwmagic.com/lvbu
http://python.fwmagic.com/lvbu
http://python.fwmagic.com/zhugeliang
http://python.fwmagic.com/zhugeliang
http://python.fwmagic.com/zhugeliang
http://python.fwmagic.com/liushan
http://python.fwmagic.com/liushan
http://python.fwmagic.com/diaochan
*/