package edu.csl.study.spark.basic

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Core_RDD_Parallelism_Local2 {
  /**
   * 获取上下文
   *
   * @return
   */
  def newContext:SparkContext={

    /**注意：线程数不能设置为1，必须大于1 .因为必须有一个线程接受数据，其他线程处理数据
     * local[N] ，N为2,3,4,5等数字 或者 local[*]
     * local[*] 表示自动分配资源，和CPU个数一样，本机Runtime.getRuntime.availableProcessors()为4
     */
    val conf:SparkConf = new SparkConf().setAppName("WordCount").setMaster("local[*]")
    //conf.set("spark.defalut.parallelism","2")
    val sc = new SparkContext(conf)
    sc.setLogLevel("warn")
    sc
  }
  val rootDir = System.getProperty("user.dir")+ "\\testFile\\"
  def main(args: Array[String]): Unit = {
      // 1.配置信息
      val sc = newContext
    // 取本地的计算机的核数
    def localCpuCount:Int = Runtime.getRuntime.availableProcessors()

      println("本机核数："+localCpuCount) //4核


      //2.数据输入 :
      val rdd:RDD[String] = sc.textFile(rootDir+"words.txt",2)
      println("textFile.partitions = "+rdd.partitions.length)

     val rdd0:RDD[String] = rdd.flatMap(x =>{
       println(   Thread.currentThread().getName+ "-flatMap处理："+x)

       x.split(" ")
     })
    val count = rdd0.count();
    println("count = "+count)
//
//    val rdd1:RDD[(String,Int)] =  rdd0.map(
//      x => {
//        println(   Thread.currentThread().getName+ "-map处理："+x)
//        (x,1)
//
//      })
//    println("map.partitions = "+rdd1.partitions.length)
//
//     val rdd1_map  = rdd1.coalesce(3)
//
//    println("coalesce.partitions.length = "+rdd1_map.partitions.length)

//     val rdd1_reduce:RDD[(String,Int)] = rdd1_map.reduceByKey(
//       (x,y) =>{
//         println(  Thread.currentThread().getName+ "-reduceByKey :"+x +"->"+y)
//
//         x+y
//       }
//       )
//    println("rdd1_reduce.partitions.length  = "+rdd1_map.partitions.length)
//
//    println("rdd1_reduce.partitions.length  = "+rdd1_map.partitions.length)

      //关闭上下文
      sc.stop()
  }

}
