package chapter04

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @author: 余辉
 * @blog: https://blog.csdn.net/silentwolfyh
 *        descriptions: 书中的所有算子都是在命令行，Spark-SQL中操作的。使用方法和下面雷同。
 *        date: 2024 - 09 - 02 10:56 上午
 */
object RddOerator {

  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf().setAppName("WordCount").setMaster("local[*]")
    //创建SparkContext，使用SparkContext来创建RDD
    val sc: SparkContext = new SparkContext(conf)

    // map
    val rdd1: RDD[Int] = sc.parallelize(List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), 2)
    val rdd2: RDD[Int] = rdd1.map(_ * 2)
    rdd2.foreach(println)

    // flatMap
    val arr = Array(
      "spark hive flink",
      "hive hive flink",
      "hive spark flink",
      "hive spark flink"
    )
    val rdd3: RDD[String] = sc.makeRDD(arr, 2)
    val rdd4: RDD[String] = rdd3.flatMap(_.split(" "))
    rdd4.foreach(println)

  }

}