package com.at.bigdata.spark.core.rdd.dep

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable

/**
 *
 * @author cdhuangchao3
 * @date 2023/3/6 8:40 PM
 */
object Spark01_RDD_toDebugString {

  def main(args: Array[String]): Unit = {
    val sparConf = new SparkConf().setMaster("local").setAppName("WordCount")
    val sc = new SparkContext(sparConf)

    // RDD不会保存数据
    // RDD为了提高容错性，会将RDD间的关系保存下来
    // 一旦出现错误，可以根据血缘关系将数据源重新读取进行计算
    val lines: RDD[String] = sc.textFile("datas/2.txt", 2)
    println(lines.toDebugString)
    println("**************")
    val words = lines.flatMap(_.split(" "))
    println(words.toDebugString)
    println("**************")
    val word21 = words.map(
      word => (word, 1)
    )
    println(word21.toDebugString)
    println("**************")

    val wordGroup = word21.groupBy(
      t => t._1
    )
    println(wordGroup.toDebugString)
    println("**************")
    val word2Count = wordGroup.map {
      case (_, list) => {
        list.reduce(
          (t1, t2) => {
            (t1._1, t1._2 + t2._2)
          }
        )
      }
    }

    println(word2Count.toDebugString)
    println("**************")
    val array = word2Count.collect()
    array.foreach(println)
    // TODO 关闭连接
    sc.stop()
  }
}
