package task2

import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}

object LogAnalysis {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.WARN)
    val conf = new SparkConf().setAppName(s"${this.getClass.getCanonicalName}").setMaster("local[*]")
    val sc = new SparkContext(conf)
    // 读取数据
    val input = sc.textFile("file:///D:\\projects\\spark_homework\\data\\cdn.txt")
//    input.take(5).foreach(println)
//    input.take(5).foreach(x => x.split("\"").foreach(println(_)))

    val data = input.map(x => {
      val fields = x.split("\"")
      val ip = fields(0).split("\\s+")(0)
      val tag = fields(0).split("\\s+")(1)
      val time = fields(0).split("[\\[|\\]]")(1)
      val page = fields(1).split("\\s+")(1)
      Log(ip, time, tag, page)
    }).cache()
//      .take(5).foreach(println)

    // 计算独立ip数
    val ips = data.map(_.ip).distinct.count()

    // 每个视频独立ip数
    val pv = data.filter(log => log.page.matches(".+\\.mp4"))
      .map(log => (log.page, log.ip))
      .aggregateByKey(Set[String]())((set, input) => set ++ Set(input), (set1, set2) => set1 ++ set2)
      .mapValues(set => set.size)
      .sortBy(_._2, false)

    // 一天中每小时流量
    val visitPerHour = data.filter("HIT" == _.tag.toUpperCase)
      .map(log => {
        val fields = log.time.split("\\s+")(0).split(":")
        (s"${fields(0)}:${fields(1)}", 1)
      })
      .reduceByKey(_ + _)
      .sortBy(_._2, false)

    // 打印相关结果
    println(s"独立ip个数为 $ips")
    println("===================================")
    println("每个视频独立ip数如下：")
    pv.take(5).foreach(println)
    println("===================================")
    println("一天中每小时流量如下：")
    visitPerHour.take(5).foreach(println)

    Thread.sleep(50000)

    sc.stop()
  }
  case class Log(ip: String, time: String, tag: String, page: String)
}
