package com.bigdata.hpsk.spark.sql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

object SQLLogAnalyzerSpark {

  def main(args: Array[String]): Unit = {

    // Create SparkConf
    val sparkConf = new SparkConf()
      .setAppName("Log Analyzer Spark Application")
      .setMaster("local[2]")

    // Set Properties
    sparkConf.set("spark.sql.shuffle.partitions", "20")

    // Create SparkContext
    val sc = SparkContext.getOrCreate(sparkConf)
    // 设置日志级别
    sc.setLogLevel("WARN")

    /**
      * 使用SparkSQL进行数据分析，创建SQLContext上下文，读取数据，转换为DataFrame
      *     SQLContext是SparkSQL程序的入口
      */
    val sqlContext = SQLContext.getOrCreate(sc)  // new SQLContext(sq)
    // this is used to implicitly convert an RDD to a DataFrame.
    import sqlContext.implicits._

    /**
      * 一般情況下，在創建SQLContext實例以後，就進行自定義函數和註冊，以便後續使用
      */
    // 定義註冊UDF函數
    sqlContext.udf.register(
      "toLower", // function name
      (word: String) => word.toLowerCase // 匿名函數
    )

    /** ================================================================= */
    /**
      * 由于要处理的数据格式为文本文件，需要将数据读取以后转换为DataFrame
      *     此处采用 RDD -> DataFrame
      *           加上schema信息
      *     DataFrame：
      *         相当于数据库中的一张表：字段名称，类型，值  -> 一行一行的数据
      */
    // file path
    val logFile = "/datas/access_log"// "D:/access_log" // apache.access.log
    // Create RDD From Local FileSystem
    val accessLogsRDD: RDD[ApacheAccessLog] = sc
      // 读取文件数据，一行一行的读取
      .textFile(logFile)
      // 过滤掉不合格的额数据
      .filter(log => ApacheAccessLog.isValidateLogLine(log))
      // 解析数据(采用正则)，将数据封装起来
      .map(log => ApacheAccessLog.parseLogLine(log))

    /**
      * 将RDD转换为DataFrame
      *     有两种方式，采用其中一种方式 要求RDD[Case Class], 自动通过反射得到 字段的名称和类型
      */
    // Create DataFrame
    val accessLogsDF: DataFrame = accessLogsRDD.toDF()  // 隐式转换
    // 查看DataFrame中schema信息
    /**
      root
       |-- ipAddress: string (nullable = true)
       |-- clientIdented: string (nullable = true)
       |-- userId: string (nullable = true)
       |-- dateTime: string (nullable = true)
       |-- method: string (nullable = true)
       |-- endpoint: string (nullable = true)
       |-- protocol: string (nullable = true)
       |-- responseCode: integer (nullable = false)
       |-- contentSize: long (nullable = false)
      */
    // accessLogsDF.printSchema()
    // 样本数据
    // accessLogsDF.show(5)

    // accessLogsDF.select(accessLogsDF("responseCode"))
    // accessLogsDF.select($"responseCode")

    /**
      * 采用SQL方式进行数据分析
      */
    // 需要将DataFrame注册时一张临时表，表的名称要HIve中标的名称定义规范
    accessLogsDF.registerTempTable("tmp_access_log")  // 相当于Hive中一张表
    // 由于临时表的数据将被使用多次，所以将表中的数据放到内存中去
    sqlContext.cacheTable("tmp_access_log")

    // 牛刀小试，查询表中数据条目数
    sqlContext.sql("SELECT COUNT(*) AS cnt FROM tmp_access_log").show()

    /**
      * 需求一：Content Size
      *   The average, min, and max content size of responses returned from the server
      */
    val contentSiezeRow: Row = sqlContext.sql(
      """
        |SELECT
        | SUM(contentSize), COUNT(*), MIN(contentSize), MAX(contentSize)
        |FROM
        | tmp_access_log
      """.stripMargin).first()
    //打印结果
    println(s"Content Size Avg: ${contentSiezeRow.getLong(0) / contentSiezeRow.getLong(1)}, Min: ${contentSiezeRow(2)}, Max: ${contentSiezeRow(3)}")

    /**
      * 需求二：Response Code
      *   A count of response code's returned.
      */
    val responseCodeToCount = sqlContext.sql(
      """
        |SELECT
        | responseCode, COUNT(*) AS cnt
        |FROM
        | tmp_access_log
        |GROUP BY
        | responseCode
      """.stripMargin).map(row => (row.getInt(0), row.getLong(1))).collect()
    println(s"Response Code Count : ${responseCodeToCount.mkString("[", ",", "]")}")

    /**
      * 需求三：IP Address
      *   All IP Addresses that have accessed this server more than N times
      */
    val ipAddresses: Array[(String, Long)] = sqlContext.sql(
      """
        |SELECT
        | ipAddress, COUNT(*) AS cnt
        |FROM
        | tmp_access_log
        |GROUP BY
        | ipAddress
        |HAVING
        | cnt > 30
        |LIMIT 10
      """.stripMargin).map(row => (row.getString(0), row.getLong(1))).collect()
    println(s"IP Addresses: ${ipAddresses.mkString(",")}")

    /**
      * 需求四：Endpoint
      *   The top endpoints requested by count.
      */
    val topEndpoints: Array[(String, Long)] = sqlContext.sql(
      """
        |SELECT
        | endpoint, COUNT(*) AS cnt
        |FROM
        | tmp_access_log
        |GROUP BY
        | endpoint
        |ORDER BY
        | cnt DESC
        |LIMIT 5
      """.stripMargin).map(row => (row.getString(0), row.getLong(1))).collect()
    println(s"Top Endpoints : ${topEndpoints.mkString("[", ",", "]")}")

    // 当表使用结束的以后，释放内存
    sqlContext.uncacheTable("tmp_access_log")

    // WEB UI
    Thread.sleep(100000000)

    // SparkContext stop
    sc.stop()
  }

}
