package com.dxf.bigdata.D05_spark_again.action

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 *  初始值,分区内和分区间规则一样
 */
object dependencies {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("app")
    sparkConf.set("spark.port.maxRetries","100")
    // apache.log 计算 相同小时点击次数
    val context = new SparkContext(sparkConf)

    val dataRDD: RDD[String] = context.textFile("./datas/apache.log",2)
    println(dataRDD.dependencies)

    println("****************************************************************************")

    val timeRdd: RDD[String] = dataRDD.map(
      line => {
        val words: Array[String] = line.split(" ")
        words(3)
      }
    )
    // timeRdd 依赖 dataRDD  OneToOneDependency 就是分区数据不变
    println(timeRdd.dependencies)
    println("****************************************************************************")

    val hourRDD: RDD[(String, Iterable[(String, Int)])] = timeRdd.map(x => {
      val time: String = x.substring(11, 13)
      (time, 1)
    }).groupBy(_._1)

    // hourRDD 依赖 timeRdd 但是分区数据变了  groupBy 是 ShuffleDependency
    println(hourRDD.dependencies)
    println("****************************************************************************")

    val value: RDD[(String, Int)] = hourRDD.map{
      case (hour,iter) => (hour,iter.size)
    }
    println(value.dependencies)
    println("****************************************************************************")


    val tuples: Array[(String, Int)] = value.collect()

  }

}
