package sparkcore.day6.lesson01

import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by Administrator on 2018/5/2.
  */
object LogAnalyer {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local").setAppName("LogAnalyer")
    val sc = new SparkContext(conf)

    val fileRDD = sc.textFile("D:\\1711班\\第十二天\\资料\\log.txt")

    /**
      * 需求一：
The average, min, and max content size of responses returned from the server.
      */
    val contentSizeRDD = fileRDD.map( line => line.split("\\#")(6).toInt)

    val max = contentSizeRDD.max()
    val min = contentSizeRDD.min()
    val avg = contentSizeRDD.mean()

    println(s"contentSize: max: $max min:$min  avg:$avg")

    /**
      * 需求二：
          A count of response code's returned.
      */
    fileRDD.map( line => (line.split("\\#")(5),1))
      .reduceByKey(_+_)
        .foreach( tuple =>{
          println("code:"+ tuple._1 + "  count:"+tuple._2)
        })

    /**
      * 需求三：
      All IPAddresses that have accessed this server more than 100 times.
      哪些IP地址访问我们的网站超过N次
      */
    fileRDD.map( line => (line.split("\\#")(0),1))
      .reduceByKey(_+_)
        .filter( tuple => {
          tuple._2 > 2
        })
          .foreach( tuple =>{
            println("ip:"+ tuple._1 + " count:"+ tuple._2)
          })

    /**
      * 需求四：
*The top endpoints requested by count.  TopN
*找出被访问次数最多的地址的前三个
      */
    val topN = fileRDD.map(line => (line.split("\\#")(4).split(" ")(1), 1))
      .reduceByKey(_ + _)
      .sortBy(_._2, false)
      .take(2)
    for(tuple <- topN){
      println("endPoint:"+tuple._1 + " count:"+tuple._2)
    }

    sc.stop()

  }

}
