package gunglad.com.gitee_22_4_21_task03

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Test02 {
  def main(args: Array[String]): Unit = {

    /**
     * SparkConf对象，它是所有任务计算的源头
     * 他会创建 DAGScheduler 和TaskScheduler
     */
    //TODO:1 创建SparkContext对象
    val sc = new SparkContext(new SparkConf().setMaster("local[2]").setAppName("统计单词"))

    /**
     * 创建方式:通过并行集合创建 RDD 算子
     * 通过：parallelize(list)方法
     */

    val list_Word: Array[String] = Array("hello", "scala", "hello", "spark", "hello")
    //TODO:2 读取数据文件，
    val one_LineDataStr: RDD[String] = sc.parallelize(list_Word)

    //2.1查看 读取结果
    //src/words.txt MapPartitionsRDD[1] at textFile at Tast01.scala:19
    println(one_LineDataStr.collect().toBuffer)

    //TODO:3 切分单词
    val words: RDD[String] = one_LineDataStr.flatMap(lines => lines.split(" "))

    //3.1
    val listWord: RDD[(String, Int)] = words.map(word => (word, 1))

    //3.2
    val result: RDD[(String, Int)] = listWord.reduceByKey((listWord1, listWordNext) => listWord1 + listWordNext)

    //3.3
    val tuples: Array[(String, Int)] = result.collect()
    println(tuples.toBuffer)
    //第一次提交
    sc.stop()

  }
}
