package core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 知识点：
 *  常用算子
 */
object core02_operator {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("test")

    val sc = new SparkContext(conf)
    sc.setLogLevel("ERROR")


    val dataRdd: RDD[(String, Int)] = sc.parallelize(List(
      ("zhangsan", 234),
      ("zhangsan", 5667),
      ("zhangsan", 343),
      ("lisi", 212),
      ("lisi", 44),
      ("lisi", 33),
      ("wangwu", 535),
      ("wangwu", 22)
    ))


    /**
     * map
     * 返回值：MapPartitionsRDD
     */
    println("=====map======")
    dataRdd.map(k => (k._1, k._2)).foreach(println)


    /**
     * flatMap
     * 返回值: MapPartitionsRDD
     */
    println("=====flatMap======")
    sc.parallelize(List("hello java spark"))
      .flatMap(_.split(" "))
      .foreach(println)

    /**
     * mapValues： 仅遍历MapPartitionsRDD 的 value
     */
    println("=====map======")
    val mapPartitionRdd: RDD[(String, (String, Int))] = dataRdd.map(k => (k._1, k))
    mapPartitionRdd.mapValues(println(_)).count()


    /**
     * union
     */

    println("=====union======")
    val d1: RDD[(Int, String)] = sc.parallelize(List((1, "zs"), (2, "lisi")))
    val d2: RDD[(Int, String)] = sc.parallelize(List((3, "zs"), (4, "lisi")))

    val unionRdd: RDD[(Int, String)] = d1.union(d2)

    unionRdd.foreach(println)


    /**
     * cartesian
     */

    println("=====cartesian======")

    val dd1: RDD[(Int, String)] = d1.map(k => (k._1, k._2))
    val dd2: RDD[(Int, String)] = d2.map(k => (k._1, k._2))

    val cartesianRdd: RDD[((Int, String), (Int, String))] = dd1.cartesian(dd2)

    cartesianRdd.foreach(println)







  }

}
