
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

import java.sql.{Connection, DriverManager, PreparedStatement}

/**
 * 封装每条ApacheLog日志数据
 */
case class ApacheLogBean(
                          ip: String,
                          mid: String,
                          user_id: String,
                          access_time: String,
                          request_method: String,
                          request_url: String,
                          http_info: String,
                          status: Int,
                          size: Long
                        )

object SparkApacheLogApp {

  def main(args: Array[String]): Unit = {
    // Spark会话对象
    val spark: SparkSession = SparkSession.builder()
      .master("local[2]")
      .appName("SparkApacheLogApp")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()
    import spark.implicits._

    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")

    // todo 2）、编写Spark代码，读取所有日志数据，进行ETL转换操作，并注册为临时试图，显示数据量和前10条样本数据；（15分）
    val logRdd: RDD[String] = sc.textFile(
      "hdfs://node101:8020/weblog/access_2013_05_30.log", minPartitions = 2
    )
    // println(logRdd.first())

    val logDF: DataFrame = logRdd
      // 过滤脏数据
      .filter(log => null != log && log.split("\\s").length == 10)
      // 提取字段值
      .map(log => {
        // log 日志数据：27.19.74.143 - - [30/May/2013:17:38:20 +0800] "GET /static/image/common/faq.gif HTTP/1.1" 200 1127
        val split: Array[String] = log.split("\\s")
        // 提取字段值，封装样例类对象
        ApacheLogBean(
          split(0),
          split(1),
          split(2),
          split(3).replace("[", ""),
          split(5).replace("\"", ""),
          split(6),
          split(7).replace("\"", ""),
          split(8).toInt,
          if ("-".equals(split(9))) 0L else split(9).toLong
        )
      })
      // 转换rdd为df
      .toDF()
    logDF.printSchema()
    println(s"日志数据量：${logDF.count()}")
    logDF.show(10, truncate = false)

    // 注册临时试图
    logDF.createOrReplaceTempView("tmp_view_log")
    //        spark.sql("SELECT count(*) AS cnt FROM tmp_view_log").show(false)
    //        spark.sql("SELECT * FROM tmp_view_log LIMIT 10").show(false)


    // 3）、业务1：统计每天PV浏览量，编写SparkSQL，展示结果；（10分）
    val df3 = spark.sql(
      """
        |SELECT
        |  split(access_time, ':')[0] AS date_str
        |  , sum(if(request_url IS NOT NULL, 1, 0)) AS pv
        |FROM tmp_view_log
        |GROUP BY split(access_time, ':')[0]
        |""".stripMargin
    )
    df3.show(50, truncate = false)

    // 4）、业务2：统计每天注册用户数，编写SparkSQL，展示结果；（10分）
    val df4: DataFrame = spark.sql(
      """
        |SELECT
        |  split(access_time, ':')[0] AS date_str
        |  , sum(if(instr(request_url, 'register') == 0, 0, 1)) AS uv
        |FROM tmp_view_log
        |GROUP BY split(access_time, ':')[0]
        |""".stripMargin
    )
    df4.show(50, truncate = false)

    // 5）、业务3：统计每天独立IP数，编写SparkSQL，展示结果；（10分）
    val df5: DataFrame = spark.sql(
      """
        |SELECT
        |    date_str, count(ip) AS unique_ip_count
        |FROM (
        |     SELECT
        |         split(access_time, ':')[0] AS date_str
        |          , ip
        |     FROM tmp_view_log
        |     GROUP BY split(access_time, ':')[0], ip
        |)
        |GROUP BY date_str
        |""".stripMargin
    )
    df5.show(50, truncate = false)


    // 6）、业务4：统计每天跳出用户数和跳出率，编写SparkSQL，展示结果；（15分）
    /*
        简化处理：使用ip地址作为session会话id，先统计每个ip访问pv，再判断
     */
    val df6: DataFrame = spark.sql(
      """
        |WITH
        |  tmp1 AS (
        |    SELECT
        |      split(access_time, ':')[0] AS date_str
        |      , ip
        |      , count(1) AS ip_pv
        |    FROM tmp_view_log
        |    GROUP BY split(access_time, ':')[0], ip
        |)
        |SELECT
        |  date_str
        |  --, count(ip) AS user_count
        |  , sum(if(ip_pv = 1, 1, 0)) AS jump_user_count
        |  , ROUND(sum(if(ip_pv = 1, 1, 0)) / count(ip), 4) AS jump_rate
        |FROM tmp1
        |GROUP BY date_str
        |""".stripMargin
    )
    df6.show(100, truncate = false)

    // 7）、业务5：使用SparkSQL将上述统计指标合并，展示结果；（10分）
    df3.createOrReplaceTempView("result_pv")
    df4.createOrReplaceTempView("result_uv")
    df5.createOrReplaceTempView("result_ip")
    df6.createOrReplaceTempView("result_jump")

    val df7: DataFrame = spark.sql(
      """
        |SELECT
        |  t1.date_str
        |  , t1.pv
        |  , t2.uv
        |  , t3.unique_ip_count
        |  , t4.jump_user_count
        |  , t4.jump_rate
        |FROM result_pv t1
        |  JOIN result_uv t2 ON t1.date_str = t2.date_str
        |  JOIN result_ip t3 ON t1.date_str = t3.date_str
        |  JOIN result_jump t4 ON t1.date_str = t4.date_str
        |""".stripMargin
    )
    df7.show(100, truncate = false)

    // 8）、将业务5结果保存到MySQL数据库表中，并查询是否保存成功；（20分）

    df7.write
      .mode(SaveMode.Overwrite)
      .format("jdbc")
      .option("url", "jdbc:mysql://node101:3306/")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("dbtable", "db_test.weblog_report")
      .option("user", "root")
      .option("password", "123456")
      .save()

    df7.rdd.foreachPartition(iter => {
      // step1. 连接
      Class.forName("com.mysql.jdbc.Driver")
      val connection: Connection = DriverManager.getConnection(
        "jdbc:mysql://node101:3306/zg6_rikao", "root", "123456"
      )
      val pstmt: PreparedStatement = connection.prepareStatement(
        """
          |INSERT INTO zg6_rikao.log_report(
          |    date_str, pv, uv, unique_ip_count, jump_user_count, jump_rate
          |) VALUES (
          |    ?, ?, ?, ?, ?, ?
          |)
          |ON DUPLICATE KEY UPDATE
          |    pv = VALUES(pv),
          |    uv = VALUES(uv),
          |    unique_ip_count = VALUES(unique_ip_count),
          |    jump_user_count = VALUES(jump_user_count),
          |    jump_rate = VALUES(jump_rate)
          |""".stripMargin
      )
      // 每个分区数据遍历
      iter.foreach(row => {
        pstmt.setObject(1, row.get(0))
        pstmt.setObject(2, row.get(1))
        pstmt.setObject(3, row.get(2))
        pstmt.setObject(4, row.get(3))
        pstmt.setObject(5, row.get(4))
        pstmt.setObject(6, row.get(5))
        // 加入批次
        pstmt.addBatch()
      })
      // 批量插入
      pstmt.executeBatch()
      // 关闭连接
      pstmt.close()
      connection.close()
    })


    spark.stop()
  }

}
