package cn.whuc.spark.operator

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo_groupBy {
  def main(args: Array[String]): Unit = {
    // 1 创建sparkContext
    val sc: SparkContext = new SparkContext(
      new SparkConf()
        .setMaster("local[*]")
        .setAppName(" ")
    )

    // 2 编写代码
    val rdd1: RDD[String] = sc.textFile("input/apache.log")
    //83.149.9.216 - - 17/05/2015:10:05:03 +0000 GET /presentations/logstash-monitorama-2013/images/kibana-search.png
    val rdd2: RDD[(String, Iterable[String])] = rdd1.groupBy(line => {
      val strings: Array[String] = line.split(" ")
      val strings1: Array[String] = strings(3).split(":")
      strings1(1)
    })
    val rdd3: RDD[(String, Int)] = rdd2.map(t => {
      (t._1, t._2.size)
    }).sortBy(_._1)

    rdd3.collect().foreach(println)


    // 3 关闭上下文对象
    sc.stop()
  }
}
