package cn.lagou.sparkcore.work

import java.util.regex.{Matcher, Pattern}

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Work2 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName(this.getClass.getCanonicalName.init).setMaster("local[*]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")

    // 业务逻辑
    val cdnLines: RDD[String] = sc.textFile("./src/main/data/cdn.txt")
    //2.1独立IP
    val ips: RDD[(String,Int)] = cdnLines.map(line => {
      (line.split("\\s+")(0),1)
    })
    //aloneIPs(cdnLines)
    ips.reduceByKey(_+_).sortBy(_._2,false).collect().foreach(println)

    //2.2视频独立IP,需要用到正则匹配
    val ipPattern = Pattern.compile("""(\S+) .+/(\S+\.mp4) .*""")
    val vedioIpsRDD: RDD[((String, String), Int)] = cdnLines.map(line => {
      val matchFlag: Matcher = ipPattern.matcher(line)
      if (matchFlag.matches()) {
        // 正则匹配，获取视频名称，获取地址
        ((matchFlag.group(2), matchFlag.group(1)), 1)
      } else {
        (("", ""), 0)
      }
    })
    //过滤无用数据
    val count: Long = vedioIpsRDD.filter { case ((vedio, ip), count) => vedio != "" && ip != "" && count > 0 }
      //统计视频数量
      .reduceByKey(_ + _)
      //相同视频不同IP再次统计
      .map { case ((vedio, _), _) => (vedio, 1) }
      .reduceByKey(_ + _).sortBy(_._2, false).count()
    println(count)

    //2.3统计一天中每个小时的流量
    val flowPattern = Pattern.compile(""".+ \[(.+?) .+ (200|206|304) (\d+) .+""")
    val flowRDD: RDD[(String, Long)] = cdnLines.map(line => {
      val matchFlag = flowPattern.matcher(line)
      if (matchFlag.matches())
        (matchFlag.group(1).split(":")(1), matchFlag.group(3).toLong)
      else
        ("", 0L)
    })

    println("每小时流量:")
    flowRDD.filter { case (hour, flow) => flow != 0 }
      // 数据量很小，可以收到一个分区中做reduce，然后转为集合操作效率高
      .reduceByKey(_ + _, 1)
      .collectAsMap()
      // 响应大小更换单位为 g
      .mapValues(_ / 1024 / 1024 / 1024)
      .toList
      // 根据小时排序
      .sortBy(_._1)
      .foreach(println)

    // 关闭SparkContext
    sc.stop()
  }


}
