package com.guchenbo.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, SparkContext, TaskContext}

/**
 * 这个RDD是最源头产生数据的rdd
 *
 * @param sc
 * @param numSlices
 */
class GcbRDD(sc: SparkContext, numSlices: Int) extends RDD[String](sc, Nil) {
  override def compute(split: Partition, context: TaskContext): Iterator[String] = {
    println("get data")
    var str = s"test-${split.index}"
    Iterator.apply(str)
  }

  override protected def getPartitions: Array[Partition] = {
    val array = new Array[Partition](numSlices)
    for (i <- 0 until numSlices) {
      array(i) = new GcbRDDPartition(i)
    }
    array
  }


}

object GcbRDD {

  def rdd(sc: SparkContext, numSlices: Int): GcbRDD = {

    def defaultPartition(): Int = {
      sc.getConf.getInt("spark.default.parallelism", 2)
    }

    println(defaultPartition)
    new GcbRDD(sc, numSlices)
  }

}

class GcbRDDPartition(idx: Int) extends Partition {
  override def index: Int = idx
}
