package spark

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}


case class SparkLog(
                      ip:String,
                      mid:String,
                      userid:String,
                      access_time:String,
                      request_method:String,
                      request_url:String,
                      http_info:String,
                      status:Int,
                      size:Long
                   )

object SparkLogAPP3 {
  def main(args: Array[String]): Unit = {
    val spark:SparkSession=SparkSession.builder()
      .master("local[2]")
      .appName("SparkLogAPP3")
      .config("spark.sql.shuffle.partitions","2")
      .getOrCreate()

    import spark.implicits._

    val sc:SparkContext=spark.sparkContext
    sc.setLogLevel("WARN")

    //2）、编写Spark代码，读取所有日志数据，进行ETL转换操作，并注册为临时试图，显示数据量和前10条样本数据；（15分）
    val logRdd:RDD[String]=sc.textFile(
      "hdfs://node101:8020//weblog/access_2013_05_30.log",minPartitions = 2
    )

    val logDF:DataFrame=logRdd
      .filter(log=>null!=log && log.split("\\s").length==10)
      .map(log=>{
          val split:Array[String]=log.split("\\s")

        SparkLog(
          split(0),
          split(1),
          split(2),
          split(3).replace("[",""),
          split(5).replace("\"",""),
          split(6),
          split(7).replace("\"",""),
          split(8).toInt,
          if("-".equals(split(9))) 0L else split(9).toLong
        )
    }).toDF()
    logDF.printSchema()

    //注册临时表
    logDF.createOrReplaceTempView("tmp_view_log2")
//    spark.sql("select * from tmp_view_log2 limit 10").show(false)

    //3）、业务1：统计每天PV浏览量，编写SparkSQL，展示结果；（10分）
    val df3:DataFrame=spark.sql(
      """
        |select
        | split(access_time,':')[0] as date_str
        | ,sum(if(request_url is not null,1,0)) as pv
        | from tmp_view_log2
        | group by split(access_time,':')[0]
        |""".stripMargin)
    df3.show(50,truncate=false)

    //4）、业务2：统计每天注册用户数，编写SparkSQL，展示结果；（10分）
    val df4:DataFrame=spark.sql(
      """
        |select
        | split(access_time,':')[0] as date_str
        | ,sum(if(instr(request_url,'register')==0,0,1)) as uv
        | from tmp_view_log2
        | group by split(access_time,':')[0]
        |""".stripMargin)
    df4.show(50,truncate=false)

    //5）、业务3：统计每天独立IP数，编写SparkSQL，展示结果；（10分）
    val df5:DataFrame=spark.sql(
      """
        |select
        | date_str,count(ip) as unique_ip_count
        | from(
        | select
        | split(access_time,':')[0] as date_str
        | ,ip
        | from tmp_view_log2
        | group by split(access_time,':')[0],ip
        | )group by date_str
        |""".stripMargin)
    df5.show(50,truncate=false)

    //6）、业务4：统计跳出用户数和跳出率，编写SparkSQL，展示结果；（15分）
    val df6:DataFrame=spark.sql(
      """
        |with tmp1 as(
        | select
        | split(access_time,':')[0] as date_str
        | ,ip
        | ,count(1) as ip_pv
        | from tmp_view_log2
        | group by split(access_time,':')[0],ip
        |)
        |select
        | date_str
        | ,count(ip) as user_count
        | ,sum(if(ip_pv=1,1,0)) as jump_user_count
        | ,round(sum(if(ip_pv=1,1,0))/count(ip),4) as jump_rate
        | from tmp1
        | group by date_str
        |""".stripMargin)
    df6.show(100,truncate=false)

    //7）、业务5：使用SparkSQL将上述统计指标合并，展示结果；（10分）
    df3.createOrReplaceTempView("result_pv")
    df4.createOrReplaceTempView("result_uv")
    df5.createOrReplaceTempView("result_ip")
    df6.createOrReplaceTempView("result_jump")

    val df7:DataFrame=spark.sql(
      """
        |select
        | t1.date_str
        | ,t1.pv
        | ,t2.uv
        | ,t3.unique_ip_count
        | ,t4.jump_user_count
        | ,t4.jump_rate
        | from result_pv t1
        | join result_uv t2 on t1.date_str=t2.date_str
        | join result_ip t3 on t1.date_str=t3.date_str
        | join result_jump t4 on t1.date_str=t4.date_str
        |""".stripMargin)
    df7.show(100,truncate=false)
    //8）、将业务5结果保存到MySQL数据库表中，并查询是否保存成功；（20分）
    df7.write
      .mode(SaveMode.Overwrite)
      .format("jdbc")
      .option("url","jdbc:mysql://node101:3306/")
      .option("driver","com.mysql.jdbc.Driver")
      .option("dbtable","db_test.weblog_report")
      .option("user","root")
      .option("password","123456")
      .save()

    spark.stop()


  }

}
