package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{Partitioner, SparkConf, SparkContext}

object Code28Partitioner extends App {

  /**
   * 分区操作：
   * 1.无Shuffle过程
   * 1.1 coalesce 可以减少分区，并且不需要有Shuffle过程
   * 1.2.读取数据时，可以指定其最小分区数 读取数据的分区是由切片决定的
   * 2.有Shuffle过程
   * 2.1 coalesce 设置默认Shuffle参数为true
   * 2.2 rePartitions 函数可以实现，rePartitions底层使用的也是 coalesce
   * 2.3 可以通过设置 spark.default.parallelism 来改变Shuffle过程中的分区数  <- 很重要
   * 2.4 groupBy等Shuffle的算子，可以设置分区数
   * 2.5 groupBy等Shuffle的算子，可以设置自定义的分区器，定义分区数
   *
   *
   * 注意分区优先级：
   * 1.groupBy等Shuffle的算子 > 使用shuffle类算子时手动指定 > 使用shuffle类算子时手动指定 > 基于默认情况
   */


  private val conf: SparkConf = new SparkConf()
  conf.setAppName("WordCount")
  conf.setMaster("local")
  conf.set("spark.default.parallelism", "3") // 设置Spark的默认Shuffle并行度为 3

  private val sc = new SparkContext(conf)
  // 对各班级中的总分数据进行排序,之后每个班级输出一个文件


  val stuInfoRDD: RDD[(String, (String, String, String, String))] = sc
    .textFile("scala_code/data/students.txt")
    .map {
      case oneLine => {
        val splitRes: Array[String] = oneLine.split(",")
        (splitRes(0), (splitRes(1), splitRes(2), splitRes(3), splitRes(4)))
      }
    }


  private val totalScoreRDD: RDD[(String, Int)] = sc
    .textFile("spark_code/data/score.txt", 4)
    .map {
      case oneLine => {
        val splitRes: Array[String] = oneLine.split(",")
        (splitRes(0), splitRes(2).toInt)
      }
    }
    .groupBy(_._1)
    //      .mapValues{
    //        case iterator => {
    //          iterator.map(_._2).sum
    //        }
    //      }
    .mapValues(_.map(_._2).sum)


  private val joinRes: RDD[(String, (Int, (String, String, String, String)))] = totalScoreRDD
    .join(stuInfoRDD)

  joinRes
    // def groupBy[K](f: T => K, p: Partitioner)  其中该Partitioner表示为一个 分区器
    .groupBy(
      //      (id, (totalScore, (name, age, gender, clazz))) => {
      //        clazz
      //      }
      (x) => {
        x._2._2._4
      }

      //      , new clazzPartitioner()
      , new Partitioner() {
        override def numPartitions: Int = 12

        // 传入一个Key值，返回分区数
        override def getPartition(key: Any): Int = {
          val clazz: String = key.toString
          clazz match {
            case "文科一班" => 0
            case "文科二班" => 1
            case "文科三班" => 2
            case "文科四班" => 3
            case "文科五班" => 4
            case "文科六班" => 5
            case "理科一班" => 6
            case "理科二班" => 7
            case "理科三班" => 8
            case "理科四班" => 9
            case "理科五班" => 10
            case "理科六班" => 11
          }
        }
      }
    )
//    .repartitionAndSortWithinPartitions(new Partitioner() {
//      override def numPartitions: Int = 12
//      // 传入一个Key值，返回分区数
//      override def getPartition(key: Any): Int = {
//        val clazz: String = key.toString
//        clazz match {
//          case "文科一班" => 0
//          case "文科二班" => 1
//          case "文科三班" => 2
//          case "文科四班" => 3
//          case "文科五班" => 4
//          case "文科六班" => 5
//          case "理科一班" => 6
//          case "理科二班" => 7
//          case "理科三班" => 8
//          case "理科四班" => 9
//          case "理科五班" => 10
//          case "理科六班" => 11
//        }
//      }
//    })
    .mapValues {
      case iterator => {
        iterator.toList.sortBy(-_._2._1)
      }
    }
    .flatMap(_._2)
    .saveAsTextFile("spark_code/output/partitioner")


}

class clazzPartitioner extends Partitioner {
  // 设置总的分区数
  override def numPartitions: Int = 12

  // 传入一个Key值，返回分区数
  override def getPartition(key: Any): Int = {
    val clazz: String = key.toString
    //    if(clazz == "文科一班"){
    //      0
    //    }else
    clazz match {
      case "文科一班" => 0
      case "文科二班" => 1
      case "文科三班" => 2
      case "文科四班" => 3
      case "文科五班" => 4
      case "文科六班" => 5
      case "理科一班" => 6
      case "理科二班" => 7
      case "理科三班" => 8
      case "理科四班" => 9
      case "理科五班" => 10
      case "理科六班" => 11
    }
  }
}
