package com.atguigu.bigdata.spark

import org.apache.spark.{SparkConf, SparkContext}


object Spark02_Oper3 {

  def main(args: Array[String]): Unit = {
    //创建SparkConf
    //s设定spark计算框架的运行环境
    val config: SparkConf = new SparkConf().setMaster("local[*]").setAppName("wordCount")
    //创建Spark上下文环境
    val sc = new SparkContext(config)
//    map算子
    val listRDD = sc.makeRDD(1 to 10,2)
//
//    listRDD.mapPartitionsWithIndex{
//      case(num,datas)=>{
//        datas.map((_,",分区号"+num))
//      }
//    }
    val indexRDD = listRDD.mapPartitionsWithIndex((index,items)=>(items.map(("分区号"+index,_))))
    indexRDD.collect().foreach(println)


  }
}
