package com.atbeijing.bigdata.spark.mytest.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}


object Operator1 {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("groupBy")
    val sc = new SparkContext(conf)

    val list = List(1,2,3,6,7,8)
    val r1: RDD[Int] = sc.makeRDD(list,3)

    //RDD没有partitionBy方法,可以通过隐式转换为PairRDDFunctions,然后调用partitionBy
    //隐式转换的规则为 RDD[(K, V)],所以要先转换数据结构
    //分区器和分区数相同时分区结果不变
    val r2: RDD[(Int, Int)] = r1.map(s => (s,1) )
    val r3: RDD[(Int, Int)] = r2.partitionBy(new HashPartitioner(2))
    r3.saveAsTextFile("outPut")


  }
}
