package com.atguigu.bigdata.spark

import org.apache.spark.{Partitioner,SparkConf, SparkContext}

//创建自定义分区
object Spark02_Oper11{

  def main(args: Array[String]): Unit = {
    //创建SparkConf
    //s设定spark计算框架的运行环境
    val config: SparkConf = new SparkConf().setMaster("local[*]").setAppName("wordCount")
    //创建Spark上下文环境
    val sc = new SparkContext(config)

    val listRDD = sc.makeRDD(List(("a",1),("b",2),("c",3)))
    val partRDD = listRDD.partitionBy(new myPartitioner(3))
    partRDD.saveAsTextFile("output")


  }
//声明分区器
// new myPartitioner(3)分为三个分区，所有的数据集中在第一个分区
  class myPartitioner(partitions: Int) extends Partitioner {
  override def numPartitions: Int = {
    partitions
  }

  override def getPartition(key: Any): Int = {
      1
  }
}
}
