package study.core.rdd.dependency

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel

/**
 * 依赖关系
 *
 * @author zh
 * @date 2021/5/17 10:52
 */
object TestDependency {
  def main(args: Array[String]): Unit = {
    // spark配置
    // local代表本地环境
    val sparkConf = new SparkConf().setMaster("local").setAppName("WordCount")
    // 建立Spark连接
    val sc = new SparkContext(sparkConf)
    // 读取文件，一行一行的数据
    val lines:RDD[String] = sc.textFile(ClassLoader.getSystemResource("wordcount/wordcount.txt").getPath)
    println("======lines=======")
    for(item <- lines.collect()){
      println(item)
    }
    println(lines.dependencies)
    println("---------------")
    // 将每行数据进行拆分，分为一个一个的单词
    val words:RDD[String] = lines.flatMap(_.split(" "))
    println("======words=======")
    for(item <- words.collect()){
      println(item)
    }
    println(words.dependencies)
    println("---------------")
    // 对每个单词设置值为1
    val wordToOne:RDD[(String,Int)] = words.map((_,1))
    println("======wordToOne=======")
    for(item <- wordToOne.collect()){
      println(item)
    }
    println(wordToOne.dependencies)
    println("---------------")
    // 使用reduceByKey可以对根据key对value进行聚合
    val wordCount:RDD[(String,Int)] = wordToOne.reduceByKey(_+_)
    println("======wordCount=======")
    for(item <- wordCount.collect()){
      println(item)
    }
//    wordCount.cache()
    wordCount.persist(StorageLevel.DISK_ONLY)
    println(wordCount.dependencies)
    println("---------------")
    println(wordCount.toDebugString)
    // 关闭spark连接
    sc.stop()
  }
}
