package com.yujiahao.bigdata.rdd.dep

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Spark01_Dep {
  def main(args: Array[String]): Unit = {
    //TODO 1、获得Spark环境的连接
    // 参数解读：setAppName("当前执行程序的名字").setMaster("当前的环境")
    // 分布式环境的基本架构为 主 - 从
    val conf: SparkConf = new SparkConf().setAppName("WordCount").setMaster("local")
    val sc: SparkContext = new SparkContext(conf)
    //TODO 3、业务逻辑
    val rdd: RDD[String] = sc.makeRDD(List("Hello", "hive", "hbase", "Hadoop"))
    //打印出血缘关系
    println("打印出血缘关系:" + rdd.toDebugString)
    println("打印出依赖关系:" + rdd.dependencies)
    println("----------------------------------------------------------------")
    val word = rdd.flatMap(_.split(" "))
    //打印出血缘关系
    println("打印出血缘关系:" + word.toDebugString)
    println("打印出依赖关系:" + word.dependencies)
    println("-----------------------------------------------------------------")
    val WordToOne = word.map((_, 1))
    //打印出血缘关系
    println("打印出血缘关系:" + WordToOne.toDebugString)
    println("打印出依赖关系:" + WordToOne.dependencies)
    println("-----------------------------------------------------------------")
    val wordCount = WordToOne.reduceByKey(_ + _)
    //打印出血缘关系
    println("打印出血缘关系:" + wordCount.toDebugString)

    println("打印出依赖关系:" + wordCount.dependencies)
    println("------------------------------------------------------------------")
    wordCount.collect().foreach(println)
    //TODO 2、关闭Spark环境的连接
    sc.stop()

  }

}
