package com.dxf.bigdata.D05_spark_again

import org.apache.spark.rdd.RDD
import org.apache.spark.util.Utils
import org.apache.spark.{HashPartitioner, Partitioner, SparkConf, SparkContext}

/**
 * 分区内 a规则 + 分区间b规则
 * reduceByKey 是分区内和分区间都是同一个规则c
 */
object 自定义分区器 {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("app")

    sparkConf.set("spark.port.maxRetries","100")
    val sc = new SparkContext(sparkConf)



    val rdd: RDD[(String, String)] = sc.makeRDD(List(("a", "1"), ("dxf", "2"), ("c", "3"), ("a", "4")),3)

    val value: RDD[(String, String)] = rdd.partitionBy(new MyPartitioner)

    value.saveAsTextFile("output2")
    sc.stop()



  }

  class MyPartitioner extends Partitioner {
    override def numPartitions: Int = 5

    /**
     * numPartitions >= 有数据的分区个数 , 如 getPartition() 返回4种,numPartitions=2 报错
     * Job aborted due to stage failure: Task 0 in stage 0.0 failed 1 times,
     * most recent failure: Lost task 0.0 in stage 0.0 (TID 0, LAPTOP-HOURFJAG, executor driver)
     *
     * @param key
     * @return
     */
    def getPartition(key: Any): Int = key match {
      case null => 0
      case "dxf" => 1
      case "xxx" => 4
      case _ => 2
    }
  }

}



