package com.jinghang.spark_base._010_RDD

import org.apache.spark.{SparkConf, SparkContext}

object _010CreateRDD {
  //local[4] local[1] 执行结果顺序不同
  val conf = new SparkConf()
    .setAppName("appName")//设置一个Application的id,用于在ui上辨别该Application
    .setMaster("local[8]")//local带表本地执行，【n】代表使用几个core,使用n个线程执行任务
    //.set("spark.testing.memory", "471859200")

  private val sparkContext = new SparkContext(conf)
  sparkContext.setLogLevel("Error")


  def main(args: Array[String]): Unit = {
    map()
   //wcStudy()
  }

  /**
    * 使用集合创建rdd
    */
  def map(): Unit = {
    val data = Array(1, 2, 3, 4, 5)
    val rdd = sparkContext.parallelize(data)
    //map函数：遍历RDD的每一个元素，X代表每一个元素
    //注意：不能用本地的思维去编程，写代码时想着数据是放在集群中不同的机器的内存中的
    val rdd2 = rdd.map(x => x + 1)
    rdd2.collect().foreach(println)
  }

  /**
    * 使用外部数据源创建rdd
    * Michael, 29
    * Andy, 30
    * Justin, 19
    * Andy, 30
    */
  def map2(): Unit = {
    val rdd = sparkContext.textFile("data/practiceOperator/people.txt")
    val mapRdd = rdd.map(line => line.length)
    val array = mapRdd.collect()
    array.foreach(println)
  }

  /**
    * WordCount案例
    * 作为了解
    * my name is xiaopang
    * i like good good study day day up
    */
  def wc(): Unit = {
    val rdd = sparkContext.textFile("data/practiceOperator/wcfile")
    val mapRdd = rdd.flatMap(line => line.split(" "))//[my, name ,is, xiaopang,i ,like, good ,good, study， day ，day， up]
      .map(x => (x, 1))//[(my,1),(name,1),****(day,1),(day,1)*******]
      .reduceByKey((a, b) => (a + b))//相同的key的所有value进行累加操作
    //collect()将整个RDD提取到一台机器上，这会导致驱动程序内存不足。
    mapRdd.collect().foreach(println)
    //mapRdd.take(3).foreach(println)
  }


  def wcStudy(): Unit = {
    val rdd = sparkContext.textFile("data/practiceOperator/wcfile")
    val flatMapRdd = rdd.flatMap(line => line.split(" "))
    val mapRdd = flatMapRdd.map(x => (x, 1))
    val reduceByKeyRdd = mapRdd.reduceByKey((a, b) => (a + b))

    println("flatMapRdd-------flatMap(line => line.split(\" \"))-------------")
    flatMapRdd.collect().foreach(println)
    println("mapRdd-------map(x => (x, 1))-----------------")
    mapRdd.collect().foreach(println)
    println("reduceByKeyRdd----------mapRdd.reduceByKey((a, b) => (a + b))-------------")
    reduceByKeyRdd.collect().foreach(println)

  }


}
