package cn.lagou.homework

import org.apache.spark.{SparkConf, SparkContext}

import java.util.regex.Pattern

object HomeWork2_log {
  val ipPattern = Pattern.compile("""(\S+) .+/(\S+\.mp4) .*""")
  val flowPattern = Pattern.compile(""".+ \[(.+?) .+ (200|206|304) (\d+) .+""")
  def main(args: Array[String]): Unit = {
    // 初始化
    val conf = new SparkConf().setAppName("BaseStationDemo").setMaster("local[*]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")

    // 读并解析 log 信息
    val logRDD = sc.textFile("data/cdn.txt")

    val ipRDD = logRDD.map(line=> (line.split("\\s+")(0),1))
      .reduceByKey(_+_)
      .sortBy(_._2,false)

    ipRDD.take(10).foreach(println)
    println(s"独立IP数：${ipRDD.count()}")

    // 2. 统计每个视频独立IP数
    println("视频独立IP 数")
    logRDD.map(line =>{
      val matchFlag = ipPattern.matcher(line)
      if(matchFlag.matches()){
        ( (matchFlag.group(2),matchFlag.group(1)),1)
      } else {
        (("",""),1)
      }
    }).filter{ case ( (video, ip),count) => video !="" && ip !="" && count != 0}
      .reduceByKey(_+_)
      .map( { case((video,_),_) => (video,1)})
      .sortBy(_._2, false)
      .take(10)
      .foreach(println)

    // 3.统计一天中每小时的流量
    println("每小时流量:")
    logRDD.map(line => {
      val matchFlag = flowPattern.matcher(line)
      if (matchFlag.matches())
        (matchFlag.group(1).split(":")(1), matchFlag.group(3).toLong)
      else
        ("", 0L)
    }).filter{case (hour, flow) => flow != 0}
      // 数据量很小，可以收到一个分区中做reduce，然后转为集合操作效率高
      .reduceByKey(_ + _, 1)
      .collectAsMap()
      .mapValues(_ / 1024 / 1024 / 1024)
      .toList
      .sortBy(_._1)
      .foreach { case (k, v) => println(s"${k}时 CDN流量${v}G") }

    sc.stop()

  }
}
