package com.pw.study.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Transformation1 {
  val conf = new SparkConf().setAppName("spark").setMaster("local[4]").set("spark.testing.memory", "4718592000")
  val sc = new SparkContext(conf)
  sc.setLogLevel("error")

  /**
   * 转化为数组
   */
  def m1(): Unit = {

    val rdd: RDD[Int] = sc.makeRDD(1 to 5, 2)
    rdd.glom().foreach(println(_))
  }

  def m2(): Unit = {
    val rdd: RDD[Int] = sc.makeRDD(1 to 5)
    val rdd1: Array[Int] = rdd.takeOrdered(2)

    rdd1.foreach(println(_))
  }

  def main(args: Array[String]): Unit = {
    m2()
    val rdd1 = sc.parallelize(List(1,2,3,4,5,6,7,8,9))
    println("rdd1的血缘关系：" + rdd1.toDebugString)
    val rdd2 = rdd1.map(x => ("xx", x))

    println( rdd2.toDebugString)

    println("--------------------------------华丽的分割线--------------------------------")

  }

}
