package com.atbeijing.bigdata.spark.core.rdd.part

import org.apache.spark.{Partitioner, SparkConf, SparkContext}

object Spark01_RDD_Part {

    def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setMaster("local").setAppName("Partitioner")
        val sc = new SparkContext(conf)

        // 自定义分区器,自行决定数据存储的分区位置
        val rdd = sc.makeRDD(
            List(
                ("nba", "xxxx"),
                ("cba", "xxxx"),
                ("cba", "xxxx"),
                ("cba", "xxxx"),
                ("nba", "xxxx"),
                ("wnba", "xxxx"),
            ),3)
        // 目标: 0 - nba, 1 - cba, 2 - wnba
        val rdd1 = rdd.partitionBy( new MyPartitioner() )
        rdd1.saveAsTextFile("output")

        sc.stop()
    }
    // 自定义分区器类
    // 1. 继承抽象类：Partitioner
    class MyPartitioner extends Partitioner {
        //分区数为3,分区编号就为0,1,2
        override def numPartitions: Int = 3
        // 根据每部分数据的k获取所在分区的编号（从0开始）
        override def getPartition(key: Any): Int = {
            key match {
                case "nba" => 0
                case "cba" => 1
                case "wnba" => 2
            }
        }
    }
}
