package com.jinghang.spark_base._010_RDD

import org.apache.spark.{SparkConf, SparkContext}

/**
  *
  * map(function) 参数为function
  * 什么是函数？ y=x+1 , x => x+1 ,  ySet = map( x=> x+1)  画图
  * 将func函数作用到数据集的每一个元素上，生成一个新的分布式的数据集返回
  *
  * word => (word,1)
  */
object _030Operator {

  //建造者模式
  val conf = new SparkConf().setAppName("Operator")
    .setMaster("local[2]")
    .set("spark.driver.memory","1g")
    .set("spark.default.parallelism","3")
  val sc = new SparkContext(conf)
  sc.setLogLevel("ERROR")

  def main(args: Array[String]): Unit = {
    map4()
    //filter()
    //flatMap()
    //flatMap()
//    lookup()
//    groupByKey()
//    reduce()
//    mapValue()
//    reduceByKey()
//    sortByKey()
//    union
//    distinct
//    join()
//    cartesian
    //action

    sc.stop()
  }


  /**
    * map
    * Return a new distributed dataset
    * formed by passing each element of the source through a function func.
    */
  def map1(): Unit = {
    val data = Array(1, 2, 3, 4, 5)
    //
    val distData1 = sc.parallelize(data)
    //map
    val distData2 = distData1.map(x => x + 1)

    distData2.collect().foreach(println)
    val partitions = distData2.partitions

    distData2.take(2).foreach(println)


  }


  def map2(): Unit = {
    val data = Array("hello spark", "hello world", "hello world")
    val distData1 = sc.parallelize(data)
    //x.split(" ") 返回的数组类型  arr(0) 获取数组中第一个元素
    val distData2 = distData1.map(x => x.split(" ")(0))

    distData2.take(10).foreach(println)
  }

  def map3(): Unit = {
    val distFile1 = sc.textFile("data/practiceOperator/people.txt")
    val lineLengths = distFile1.map(line => line.length)
    //val totalLength = lineLengths.reduce((a, b) => a + b)
    lineLengths.saveAsTextFile("data/out/map")
  }

  def map4(): Unit = {
    val data = Array("hello spark", "hello world", "hello world")
    val distData1 = sc.parallelize(data)
    val distData2 = distData1
      .flatMap(x => x.split(" ")).filter( x => x.equals("hello"))
      .map(x=>(x,1))
        .reduceByKey((x,y) =>(x+y) )
    distData2.saveAsTextFile("data/out/wc")
  }


  /*
        返回满足过滤条件所有元素的新RDD
   */
  def filter(): Unit = {
    val data = Array(1, 2, 3, 4, 5, 6, 7, 8, 9)
    val RDD1 = sc.parallelize(data)
    val filterRDD = RDD1.filter(x => x > 5)
    filterRDD.collect().foreach(println)
  }


  /**
    * flatMap :
    * Similar to map, but each input item can be mapped to 0 or more output items
    * (so func should return a Seq rather than a single item).
    *
    * map
    * {苹果，梨子}.map(去皮） = {去皮苹果，去皮梨子}
    * 其中： “去皮”函数的类型为：A => B
    * flatMap
    * {苹果，梨子}.flatMap(切碎) = {苹果碎片1，苹果碎片2，梨子碎片1，梨子碎片2}
    * 其中： “切碎”函数的类型为： A => List<B>
    *    Array("hello spark", "hello world", "hello world")
    *    ==> [hello, spark] [hello, world] [hello, world]
    *    ==> [hello, spark, hello, world,hello, world]
    *
    *
    *
    *
    */

  def flatMap(): Unit = {
    val data = Array("hello spark", "hello world", "hello world")
    val RDD1 = sc.parallelize(data)
    val flatMapRDD = RDD1.flatMap(line => line.split(" "))
    println("RDD1.flatMap(line => line.split(\" \"))")
    flatMapRDD.collect().foreach(println)
  }

  def flatMap2(): Unit = {
    val data = Array("hello spark", "hello world", "hello world")
    val RDD1 = sc.parallelize(data)
    val flatMapRDD = RDD1.flatMap(line => line.split(" ")).map(x => (x, 1))
    flatMapRDD.collect().foreach(println)
  }

  /**
    * 根据key查看value的值
    */
  def lookup(): Unit = {
    val data = Array("hello spark", "hello world", "hello world")
    val RDD1 = sc.parallelize(data)
    val mapRDD = RDD1
      .flatMap(line => line.split(" "))
      .map(x => (x, 1))
    println("lookup(\"hello\")")
    mapRDD.lookup("hello").foreach(println)

    /*
        lookup("hello")
            1
            1
            1

     */
  }

  /**
    *
    */

  def groupByKey(): Unit = {
    val data = Array("hello spark", "hello world", "hello world")
    val RDD1 = sc.parallelize(data)
    val mapRDD = RDD1
      .flatMap(line => line.split(" "))
      .map(x => (x, 1))
    val groupByRdd = mapRDD.groupByKey()
    println("groupByKey()")
    groupByRdd.collect().foreach(println)
  }

  def reduce(): Unit = {
    val data = Array(1, 2, 5,8)
    val RDD1 = sc.parallelize(data)
    val result = RDD1.reduce((a,b) => (a+b))//rdd中所有的元素累加
    println("reduce")
    println(result)

  }

  def mapValue(): Unit = {
    val data = Array("hello spark", "hello world", "hello world")
    val RDD1 = sc.parallelize(data)
    val mapRDD = RDD1
      .flatMap(line => line.split(" "))
      .map(x => (x, 1))
    //reduceBykey的作用相当于groupByKey+mapValues
    val groupByRdd = mapRDD.groupByKey()
    val mapValueRdd = groupByRdd.mapValues(value => value.reduce((x,y) => (x+y)))
    println("mapValue()")
    mapValueRdd.collect().foreach(println)
  }


  /**
    * word count
    */
  def reduceByKey(): Unit = {
    val data = Array("hello spark", "hello world", "hello world")
    val RDD1 = sc.parallelize(data)
    val mapRDD = RDD1
      .flatMap(line => line.split(" "))
      .map(x => (x, 1))
    val reduceByKeyRdd = mapRDD.reduceByKey((a, b) => a + b)
    reduceByKeyRdd.collect().foreach(println)
    //reduceByKeyRdd.saveAsTextFile("output/reduceByKey")
  }





  /**
    * (1,spark)(2,world)(3,hello)
    * 需求：结果降序排列
    *
    *
    */
  def sortByKey(): Unit = {
    val data = Array("hello spark", "hello world", "hello world")
    val RDD1 = sc.parallelize(data)
    val mapRDD = RDD1
      .flatMap(line => line.split(" "))
      .map(x => (x, 1))
    val reduceByKeyRdd = mapRDD.reduceByKey((a, b) => a + b)
    val sortByKeyRDD = reduceByKeyRdd.map(x => (x._2, x._1)).sortByKey(false)
    sortByKeyRDD.collect().foreach(println)
    println("sortByKeyRDD")
    println(sortByKeyRDD)
    //sortByKeyRDD.saveAsTextFile("output/sortByKey")
  }
//********************************以上必须掌握*************************************************
  /**
    * 并集
    */
  def union(): Unit = {
    val RDD1 = sc.parallelize(Array(1, 2, 3, 4, 5, 6))
    val RDD2 = sc.parallelize(Array(5, 6, 7, 8, 9))
    val unionRDD = RDD1.union(RDD2)
    unionRDD.collect().foreach(println)
  }

  /**
    * 交集
    */
  def intersection(): Unit = {
    val RDD1 = sc.parallelize(Array(1, 2, 3, 4, 5, 6))
    val RDD2 = sc.parallelize(Array(5, 6, 7, 8, 9))
    val unionRDD = RDD1.intersection(RDD2)
    unionRDD.collect().foreach(println)
  }

  /**
    * 差集
    */

  def subtract(): Unit = {
    val RDD1 = sc.parallelize(Array(1, 2, 3, 4, 5, 6))
    val RDD2 = sc.parallelize(Array(5, 6, 7, 8, 9))
    val unionRDD = RDD1.subtract(RDD2)
    unionRDD.collect().foreach(println)
  }

  def cartesian(): Unit = {
    val RDD1 = sc.parallelize(Array(1, 2, 3, 4, 5, 6))
    val RDD2 = sc.parallelize(Array(5, 6, 7, 8, 9))
    val unionRDD = RDD1.cartesian(RDD2)
    println("cartesian")
    unionRDD.collect().foreach(println)
  }

  def distinct(): Unit = {
    val RDD1 = sc.parallelize(Array(1, 2, 3, 4, 5, 6))
    val RDD2 = sc.parallelize(Array(5, 6, 7, 8, 9))
    val unionRDD = RDD1.union(RDD2).distinct()
    unionRDD.collect().foreach(println)
  }


  //todo

  def join(): Unit = {
    val RDD1 = sc.parallelize(Array(("A", "a1"), ("C", "c1"), ("D", "d1"), ("F", "f1"), ("F", "f2")))
    val RDD2 = sc.parallelize(Array(("A", "a2"), ("C", "c2"), ("C", "c3"), ("E", "e1")))
    val fullOuterJoinRDD = RDD1.fullOuterJoin(RDD2)
    val rightOuterJoinRDD = RDD1.rightOuterJoin(RDD2)
    println("fullOuterJoinRDD")
    fullOuterJoinRDD.collect().foreach(println)
    println("rightOuterJoinRDD")
    rightOuterJoinRDD.collect().foreach(println)
  }


  /*
      transformation:转变，转化
      action：动作执行
      rdd的转换是惰性的，当rdd进行rdd.flatMap.map 转换操作时，转换操作不会立即执行
        当有action算子触发时，以前的转换算子才会执行。
      本质上是底层为了优化性能，当用到时候才会计算。防止没用的转换rdd执行，造成不必要的资源浪费。
   */
  def action(): Unit = {
    val data = Array(1, 2, 3, 4, 5, 6, 7, 8, 9)
    val RDD = sc.parallelize(data)
    RDD.collect()
    RDD.foreach(x => println(x))
    RDD.foreach(println)
    RDD.reduce((x, y) => x + y)
    RDD.take(2).foreach(println)
    RDD.saveAsTextFile("")
  }



}
