package com.bigdata.hpsk.spark.logs

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object LogAnalyzerSpark {

  def main(args: Array[String]): Unit = {

    // Create SparkConf
    val sparkConf = new SparkConf()
      .setAppName("Log Analyzer Spark Application")
      .setMaster("local[2]")

    // Create SparkContext
    val sc = SparkContext.getOrCreate(sparkConf)
    // 设置日志级别
    sc.setLogLevel("WARN")

/** ================================================================= */
    // file path
    val logFile = "/datas/access_log"// "D:/access_log" // apache.access.log
    // Create RDD From Local FileSystem
    val accessLogsRDD: RDD[ApacheAccessLog] = sc
      // 读取文件数据，一行一行的读取
      .textFile(logFile)
      // 过滤掉不合格的额数据
      .filter(log => ApacheAccessLog.isValidateLogLine(log))
      // 解析数据(采用正则)，将数据封装起来
      .map(log => ApacheAccessLog.parseLogLine(log))

    // 由于后面的需求都是针对accessLogsRDD进行分析的，所以进行缓存
    accessLogsRDD.cache()

    println(s"Count = ${accessLogsRDD.count()} \n ${accessLogsRDD.first()}")
    /**
      * 需求一：Content Size
      *   The average, min, and max content size of responses returned from the server
      */
    val contentSizeRDD: RDD[Long] = accessLogsRDD.map(_.contentSize)
    // 此RDD同样使用很多次，需要cache
    contentSizeRDD.cache()
    // compute
    val avgContentSize = contentSizeRDD.reduce(_ + _) / contentSizeRDD.count()
    val minContentSize = contentSizeRDD.min()
    val maxContentSize = contentSizeRDD.max()
    // 释放内存
    contentSizeRDD.unpersist()
    //打印结果
    println(s"Content Size Avg: ${avgContentSize}, Min: ${minContentSize}, Max: ${maxContentSize}")

    /**
      * 需求二：Response Code
      *   A count of response code's returned.
      */
    val responseCodeToCount = accessLogsRDD
      // WordCount
      .map(log => (log.responseCode, 1))
      // 聚合统计
      .reduceByKey(_ + _)
      // 返回数组
      .collect() // 由于Response Code 状态不多
    println(s"Response Code Count : ${responseCodeToCount.mkString("[", ",", "]")}")

    /**
      * 需求三：IP Address
      *   All IP Addresses that have accessed this server more than N times
      */
    val ipAddresses: Array[(String, Int)] = accessLogsRDD
      .map(log => (log.ipAddress, 1))
      .reduceByKey(_ + _)
      .filter(_._2 > 20)  // 访问网站的次数大于某个值
      .take(20)
    println(s"IP Addresses: ${ipAddresses.mkString(",")}")

    /**
      * 需求四：Endpoint
      *   The top endpoints requested by count.
      */
    val topEndpoints: Array[(String, Int)] = accessLogsRDD
      .map(log => (log.endpoint, 1))
      .reduceByKey(_ + _)
      .top(5)(OrderingUtils.SecondValueOrdering)
    /**
      .map(tuple => (tuple._2, tuple._1))
      .sortByKey(ascending = false)
      .take(5).map(tuple => (tuple._2, tuple._1))
    */
    println(s"Top Endpoints : ${topEndpoints.mkString("[", ",", "]")}")

    // 释放内存
    accessLogsRDD.unpersist()

    // WEB UI
    Thread.sleep(100000000)

    // SparkContext stop
    sc.stop()
  }

}
