package com.atguigu1.core.io

import java.util.Date

import org.apache.hadoop.mapred.lib.HashPartitioner
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partitioner, SparkConf, SparkContext}

/**
 *
 * @description: 缓存案例
 * @time: 2021-03-12 11:45
 * @author: baojinlong
 **/
object Spark01DiyPartition {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("persistDemo")
    // 设置rdd分区数字
    val sparkContext = new SparkContext(conf)
    val rdd: RDD[(String, String)] = sparkContext.makeRDD(Seq(
      ("nba", "xxxx"),
      ("cba", "xxxx"),
      ("wba", "xxxx"),
      ("xba", "xxxx")
    ), 3)

    rdd.partitionBy(new MyPartitioner).saveAsTextFile("xxxks")
    sparkContext.stop
  }


}

class MyPartitioner extends Partitioner {
  // 分区的数量=3
  override def numPartitions: Int = 3

  /**
   * 针对key,value的类型,只有Key-Value类型的RDD才有分区器的，非Key-Value类型的RDD分区器的值是None
   *
   * @param key
   * @return
   */
  override def getPartition(key: Any): Int = {
    key match {
      case "nba" => 0
      case "cba" => 1
      case _ => 2
    }
  }

}