package homework2

import homework1.HttpToAddress.exchangeIp
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession

object IpCount {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("IpCount")
    val sc = new SparkContext(conf)


    val firstRDD: RDD[(String, (String, String))]  = sc.textFile("data/cdn.txt").map { line =>
      val strs = line.split(" ")
      (strs(0), (strs(2), strs(6)))
    }

    //在这里缓存一下，因为下面会多次使用
    firstRDD.cache()

    //2.1 计算独立IP数
    val ipCount: collection.Map[String, Long] = firstRDD.mapValues(v => 1).countByKey()
    println(ipCount.toBuffer)

    //2.2 统计每个视频独立IP数（视频的标志：在日志文件的某些可以找到 *.mp4，代表一个视频文件）
    //先过滤后统计
    val mp4RDD = firstRDD.map{case (k, v) => (k, v._2)}
    val mp4Count: collection.Map[(String, String), Long] = mp4RDD.filter(v => v._2.endsWith(".mp4")).map(v => ((v._1, v._2), 1)).countByKey()

    println(mp4Count.toBuffer)

    //2.3 统计一天中每个小时的流量
    //不是很明白这个流量是如何计算的


    sc.stop()
  }
}
