package hello
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.log4j.{Level, Logger}
object sample {
  def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setMaster("local[2]").setAppName(_02SparkTransformationOps.getClass.getSimpleName)
        Logger.getLogger("org.apache.spark").setLevel(Level.OFF)
        val sc = new SparkContext(conf)
        transformationOps4(sc)
        sc.stop()
  }
  def transformationOps4(sc:SparkContext): Unit = {
        val list = 1 to 1000
        val listRDD = sc.parallelize(list)//通过调用SparkContext的parallelize方法，在一个已经存在的Scala集合上创建的（一个Seq对象）。集合的对象将会被拷贝，创建出一个可以被并行操作的分布式数据集。
        val sampleRDD = listRDD.sample(false, 0.2)//不放回抽样20%

        sampleRDD.foreach(num => print(num + " "))//输出抽样后的集合
        println
        println("sampleRDD count: " + sampleRDD.count())
        println("Another sampleRDD count: " + sc.parallelize(list).sample(false, 0.2).count())
    }
}