package com.shujia.core

import org.apache.spark.{Partitioner, SparkConf, SparkContext}

object Demo17Partition {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("app").setMaster("local")
    val sc = new SparkContext(conf)

    val rdd = sc.textFile("data/part")

    //获取rdd分区数
    val partition = rdd.getNumPartitions
    println("rdd1 " + partition)

    //增加rdd分区  重分区
    //默认hash分区    "001".hashCode % 10
    val rdd2 = rdd.repartition(10)

    println("rdd2 " + rdd2.getNumPartitions)

    val rdd3 = rdd.coalesce(10, shuffle = true)
    println("rdd3 " + rdd3.getNumPartitions)

    val rdd4 = rdd.coalesce(1, shuffle = false)
    println("rdd4 " + rdd4.getNumPartitions)

    /**
      * 增加分区必须有shuffle
      * 减少分区可以没有shuffle
      *
      * .coalesce(100, shuffle = false)  合并小文件
      *
      */


    /**
      * 自定义分区
      */

    val studentRDD = sc.textFile("data/student.txt")

    //每一个班级生成一个文件
    studentRDD
      .map(line => {
        val clazz = line.split(",")(4)
        (clazz, line)
      }).partitionBy(new MyPartition(12))
      .saveAsTextFile("data/out")


  }
}

/**
  * 创建自定义分区器
  *
  */
class MyPartition(partition: Int) extends Partitioner {
  //分区数量
  override def numPartitions: Int = partition

  //通过key获取key属于哪一个分区
  override def getPartition(clazz: Any): Int = {
    clazz match {
      case "文科一班" => 0
      case "文科二班" => 1
      case "文科三班" => 2
      case "文科四班" => 3
      case "文科五班" => 4
      case "文科六班" => 5
      case "理科一班" => 6
      case "理科二班" => 7
      case "理科三班" => 8
      case "理科四班" => 9
      case "理科五班" => 10
      case "理科六班" => 11
      case _ => 0
    }
  }
}