package com.spark.rdd

import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by Administrator on 2017/7/20.
  */
object sparkForeach {

  def main(args: Array[String]): Unit = {

    System.setProperty("hadoop.home.dir", "D:\\openSource\\hadoop-2.7.3\\")
    //设置本机Spark配置
    val conf = new SparkConf().setAppName("wordCount").setMaster("local")
      .set("spark.driver.memory", "2G")
    //创建Spark上下
    val sc = new SparkContext(conf)
    val data=sc.parallelize(List(1,2,3,4,5,6,76,78,0))
    data.repartition(2).map(x=>x*1).foreachPartition(b=>{
      println("------------------------------------------")
      val a=b.toSeq
      println(a.mkString(";") +" a.size1="+a.size)
      println(a.mkString(";") +" a.size2="+a.size)
      println(a.mkString(";") +" a.size3="+a.size)
      println(a.mkString(";") + " a.size4="+a.size)
    })



  }

}
