package edu.csl.study.spark.basic

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Core_RDD_Parallelism_Local {
  /**
   * 获取上下文
   *
   * @return
   */
  def newContext:SparkContext={

    /**注意：线程数不能设置为1，必须大于1 .因为必须有一个线程接受数据，其他线程处理数据
     * local[N] ，N为2,3,4,5等数字 或者 local[*]
     * local[*] 表示自动分配资源，和CPU个数一样，本机Runtime.getRuntime.availableProcessors()为4
     */
    val conf:SparkConf = new SparkConf().setAppName("WordCount").setMaster("local[*]")
    conf.set("spark.defalut.parallelism","2")
    val sc = new SparkContext(conf)
    sc.setLogLevel("warn")
    sc
  }
  val rootDir = System.getProperty("user.dir")+ "\\testFile\\"
  def main(args: Array[String]): Unit = {
      // 1.配置信息
      val sc = newContext
    // 取本地的计算机的核数
    def localCpuCount:Int = Runtime.getRuntime.availableProcessors()

      println("本机核数："+localCpuCount) //4核


      //2.数据输入 :
      val rdd:RDD[String] = sc.textFile(rootDir+"words.txt")
      println("rdd.partitions.length = "+rdd.partitions.length)

     val rdd1:RDD[String] = rdd.flatMap(x =>{
       println(   Thread.currentThread().getName+ "-map处理："+x)

       x.split(" ")
     })
     val rdd1_map = rdd1.map(x => (x,1))
    println("rdd1_map.partitions.length = "+rdd1_map.partitions.length)
     ////reduceByKey、sortByKey
     val accumulator = sc.longAccumulator("count")
     val rdd1_reduce:RDD[(String,Int)] = rdd1_map.reduceByKey(
       (x,y) =>{
         println(  Thread.currentThread().getName+ "-reduceByKey :"+x +"->"+y)
         accumulator.add(1)
         x+y
       }
       )

    println("rdd1_reduce.partitions.length  1= "+rdd1_reduce.partitions.length)
     val rdd2 = rdd1_reduce.map( (_,1))
    rdd2.collect().foreach(println)
    println("rdd1_reduce.partitions.length 2= "+rdd1_reduce.partitions.length)
    println("accumulator1 = "+accumulator.value)
     println("---words---结果："+rdd2.collect().mkString(","))
    println("rdd1_reduce.partitions.length 3= "+rdd1_reduce.partitions.length)
    println("accumulator2 = "+accumulator.value)
//
//     val rdd2 = sc.parallelize(List(5, 6, 4, 3))
//     val rdd3 = sc.makeRDD(List(1, 2, 3, 4))
//     //求交集
//     val rdd21 = rdd2.mapPartitionsWithIndex((index,iter) => {
//
//      iter.map(x => {
//        println("第"+index+"个分区数据： "+x )
//        x * 10
//      } )
//    })
//    println("---按分区mapPartitionsWithIndex--partitions.length = "+rdd2.partitions.length)
//    //打印结果
//    println("---按分区mapPartitionsWithIndex---结果："+rdd21.collect().mkString(","))
//    println("---交集intersection---partitions.length = "+rdd2.partitions.length)
//    println("---交集intersection-- -结果："+rdd2.intersection(rdd3).collect().mkString(","))
//

      //关闭上下文
      sc.stop()
  }

}
