package cn.lagou.sparkcore

import java.util.regex.{Matcher, Pattern}

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object homework2 {
  def main(args: Array[String]): Unit = {
    // 正则表达式 符合视频格式的正则
    val ipPattern: Pattern = Pattern.compile("""(\S+) .+/(\S+\.mp4) .""")
    // 正则表达式
    val flowPattern: Pattern = Pattern.compile(""".+ \[(.+?) .+ (200|206|304) (\d+) .+""")

    // 定义SparkContext
    val conf: SparkConf = new SparkConf().setAppName(this.getClass.getCanonicalName).setMaster("local[*]")
    val sc = new SparkContext(conf)
    // 设置日志级别
    sc.setLogLevel("WARN")

    // 计算数据
    val dataRDD: RDD[String] = sc.textFile("file:///F:\\code\\lagoubigdata\\data\\cdn.txt")

    //1 获取独立的IP数
    dataRDD.map(x => (x.split("\\s+")(0), 1))
    val results: RDD[(String, Int)] = dataRDD.map(x => (x.split("\\s+")(0), 1))
      .reduceByKey(_ + _)
      .sortBy(_._2, ascending = false, 1)

    println("---------独立ID数----------")
    results.take(10).foreach(println)
    println(s"独立ip数： ${results.count()}")

    // 统计每个视频独立IP数
    val videoRDD: RDD[((String, String), Int)] = dataRDD.map(line => {
      val matcherFlag: Matcher = ipPattern.matcher(line)
      if (matcherFlag.matches()) {
        ((matcherFlag.group(2), matcherFlag.group(1)), 1)
      } else {
        ((" ", " "), 0)
      }
    })

    // 计算每个视频的独立IP
    val result2: RDD[(String, Int)] = videoRDD.filter {
      case ((video, ip), count) => video != "" && ip != "" && count != 0
    }.map { case ((video, ip), _) => (ip, video) }
      .distinct()
      .map { case (_, video) => (video, 1) }
      .reduceByKey(_ + _)
      .sortBy(_._2, ascending = false, 1)

    println("---------------每个视频的独立IP数----------------")
    result2.foreach(println)

    //3 统计一天中每个小时的流量
    val flowRDD: RDD[(String, Long)] = dataRDD.map(line => {
      val matchFlag: Matcher = flowPattern.matcher(line)
      if (matchFlag.matches())
        (matchFlag.group(1).split(":")(1), matchFlag.group(3).toLong)
      else ("", 0L)
    })

    println("----------------------每小时流量--------------------")

    flowRDD.filter{case (hour,flow) =>flow != 0}
    // 数据量很小，可以收到一个分区中做 reduce ，然后转为集合操作效率高
      .reduceByKey(_+_,1)
      .collectAsMap()
    //相应大小更换单位位g
      .mapValues(_/1024 / 1024 / 1024)
      .toList
    // 根据小时排序
      .sortBy(_._1)
      .foreach{case (k,v) => println(s"${k}时 CDN流量${v}G")}

    sc.stop()
  }
}
