package cn.bigdata.sparkcore.job

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Case2 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName(this.getClass.getName.init).setMaster("local[*]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("warn")

    // 取出ip,小时,视频，方便后面计算
    val cdnRDD: RDD[(String, String, String, Long)] = sc.textFile("data/cdn.txt").map(line => {
      val arr = line.split("\\s+")

      val videoUrl = arr(6).split("/")
      val dayHourStr = arr(3).split(":")
      // ip, 小时， 视频
      (arr(0), dayHourStr(1), videoUrl.last.split("\\?")(0), arr(9).toLong)
    })

    val ipCount = cdnRDD.map(_._1).distinct().count()
    println(s"计算每个ip的数量: $ipCount")

    println("计算每个视频的独立IP数")
    cdnRDD.filter(_._3.contains(".mp4")).map {
      case (ip, _, video, _) => {
        (video, ip)
      }
    }.groupByKey()
      .mapValues(_.toSet.size).sortBy(_._2).collect().foreach(println)

    println("计算一天每个小时的流量")
    cdnRDD.map{
      case (_, hour, _, traffic) => {
        (hour, traffic )
      }
    }.reduceByKey(_+_).mapValues(_/1024/1024/1024+"G").sortByKey().collect().foreach(println)
    sc.stop()
  }
}
