package hello
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.log4j.{Level, Logger}
object _02SparkTransformationOps  {
  def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setMaster("local[2]").setAppName(_02SparkTransformationOps.getClass.getSimpleName)
        Logger.getLogger("org.apache.spark").setLevel(Level.OFF)
        val sc = new SparkContext(conf)
        transformationOps1(sc)
        sc.stop()
    }
    def transformationOps1(sc:SparkContext): Unit = {
        val list = List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
        val listRDD = sc.parallelize(list)//集合的对象将会被拷贝，创建出一个可以被并行操作的分布式数据集。该步骤会打乱之前的顺序
        val retRDD = listRDD.map(num => num * 10)
        retRDD.foreach(num => println(num))
    }

}