import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession

import java.util.regex.{Pattern, Matcher}

/**
 * ClassName: Homework2 <br/>
 * Description: <br/>
 * date: 2021/7/6 8:42<br/>
 *
 * @author Hesion<br/>
 * @version
 * @since JDK 1.8
 */
object Homework2 {
  //video正则匹配
  val typePattern: Pattern = Pattern.compile("""(\S+) .+/(\S+\.mp4) .*""")
  // 正常返回的请求
  val flowPattern = Pattern.compile(""".+ \[(.+?) .+ (200|206|304) (\d+) .+""")

  def main(args: Array[String]): Unit = {
    // 初始化,框架代码
    val conf = new SparkConf().setAppName(this.getClass.getCanonicalName).setMaster("local[*]")
    val sc: SparkContext = new SparkContext(conf)


    val rdd: RDD[String] = sc.textFile("data/cdn.txt")

    //计算独立ip
    val ipRDD = rdd.map({
      line => {
        val fields = line.split("\\s+")
        (fields(0), 1)
      }
    }).reduceByKey(_ + _).sortBy(_._2, false) // 取前十
    ipRDD.take(10).foreach(println)
    println(s"视频独立IP总数：${ipRDD.count()}")

    val ipVideoRDD = rdd.map(line => {
      val matchFlag: Matcher = typePattern.matcher(line)
      if (matchFlag.matches()) {
        println((matchFlag.group(2), matchFlag.group(1)))
        ((matchFlag.group(2), matchFlag.group(1)), 1)
      } else
        (("", ""), 0)
    })

    //由于之前不匹配的ip 会被组装成(("", ""), 0),所以需要做一个筛选
    ipVideoRDD.filter { case ((video, ip), count) => video != "" && ip != "" && count != 0 }
      //此时的数据集已经都是mp4 视频的了，进行聚合统计
      .reduceByKey(_ + _)
      .map { case ((video, _), _) => (video, 1) }
      .reduceByKey(_ + _)
      .sortBy(_._2, false)
      .take(10)
      .foreach(println)

    //通过正则去匹配请求时间和响应大小
    val flowRdd = rdd.map(line => {
      val matchFlag = flowPattern.matcher(line)
      if (matchFlag.matches()) {
        println((matchFlag.group(1).split(":")(1), matchFlag.group(3).toLong))
        (matchFlag.group(1).split(":")(1), matchFlag.group(3).toLong)
      } else
        (("", ""), 0L)
    })
    //统计每小时的流量
    flowRdd.filter { case (hour, flow) => flow != 0 }
      .reduceByKey(_ + _, 1)
      .collectAsMap()
      .mapValues(_ / 1024 / 1024 / 1024)
      .toList
      .sortBy(_._2)
      .foreach { case (k, v) => println(s"${k}时 CDN流量${v}G") }
    sc.stop()
  }
}
