package com.shujia.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo4MapPartition {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setMaster("local")
      .setAppName("map")


    val sc = new SparkContext(conf)


    val rdd1: RDD[String] = sc.textFile("spark/data/words")


    /**
      * mapPartitions: 一次处理一个分区的数据，需要返回一个迭代器或者集合
      *
      * mapPartitionsWithIndex: 多了一个分区编号
      *
      */

    val rdd2: RDD[String] = rdd1.mapPartitions((iter: Iterator[String]) => {
      println("一个分区")
      iter.flatMap(line => line.split(","))
    })

    //rdd2.foreach(println)


    val rdd3: RDD[String] = rdd1.mapPartitionsWithIndex((index: Int, iter: Iterator[String]) => {
      println("分区编号：" + index)
      iter
    })

    rdd3.foreach(println)

  }

}
