package com.kylin

import java.text.SimpleDateFormat
import java.util.{Date, Locale}

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{SaveMode, SparkSession}

object PvApp {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("DSApp")
      .master("local[2]")
      .getOrCreate()

    import spark.implicits._

    val sc = spark.sparkContext
    val inputPath = "C:\\Users\\吉日木图\\Desktop\\14\\日志 - 副本\\access_2013_05_31.log"
    val inputRDD = sc.textFile(inputPath)
    val splitRDD = inputRDD
      .filter(line => line.split(" ", -1).size == 10)
      .map(line => {
        val item = line.split(" ", -1)
        val ip = item(0)
        var sdf = new SimpleDateFormat("[dd/MMM/yyyy:hh:mm:ss", Locale.ENGLISH)
        val d = sdf.parse(item(3))
        sdf = new SimpleDateFormat("yyyy-MM-dd HH")
        val time = sdf.format(d)
        val code = item(8)
        val traffic = item(9)

        ip + "|" + time + "|" + code + "|" + traffic
      })

    val filterRDD = splitRDD.filter(line => {
      val item = line.split("\\|", -1)
      !"-".equals(item(3))
    })

    /*val groupRDD = filterRDD.map(line => line.split("\\|", -1))
      .groupBy(item => item(1))
      .map(tuple => {
        val time = tuple._1

        var sum = 0

        for (itme <- tuple._2) {
          sum = sum + itme(3).toInt
        }
        time + "|" + sum
      })

    groupRDD.take(30).foreach(println)*/


    //getPv(filterRDD, spark)
    getUv(filterRDD, spark)


    spark.stop()
  }

  def getPv(rdd: RDD[String], spark: SparkSession): Unit = {
    import spark.implicits._
    val DF = rdd.map(line => line.split("\\|", -1))
      .groupBy(item => item(1) + "|" + item(0))
      .map(tuple => {
        val groupItem = tuple._1.split("\\|", -1)
        val time = groupItem(0)
        val ip = groupItem(1)

        var pv = 0

        for (itme <- tuple._2) {
          pv = pv + itme(3).toInt
        }
        PV(time, ip, pv)
      }).toDF()

    val jdbcDF = DF.orderBy("time")
    jdbcDF.show()

    //jdbcDF.write.format("jdbc").mode(SaveMode.Overwrite).option("driver", "com.mysql.jdbc.Driver").option("url", "jdbc:mysql://106.13.32.254:3306/febs_spark_log_pv?useUnicode=true&characterEncoding=UTF-8").option("dbtable", "t_log_pv").option("user", "spark_log_pv").option("password", "123456").save()
    jdbcDF.write.format("jdbc").mode(SaveMode.Overwrite).option("driver", "com.mysql.cj.jdbc.Driver").option("url", "jdbc:mysql://106.13.32.254:3306/febs_spark_log_pv?serverTimezone=UTC&characterEncoding=utf8&useUnicode=true&useSSL=false").option("dbtable", "t_log_pv").option("user", "spark_log_pv").option("password", "123456").save()
  }


  def getUv(rdd: RDD[String], spark: SparkSession): Unit = {
    import spark.implicits._
    val DF = rdd
      .map(line => {
        val item = line.split("\\|", -1)
        item(0) + "|" + item(1)
      })
      .distinct()
      .groupBy(line => line.split("\\|")(1))
      .map(tuple => {
        val time = tuple._1
        val uv = tuple._2.size
        UV(time, uv)
      }).toDF()

    val jdbcDF = DF.orderBy("time")

    jdbcDF.show()

    //jdbcDF.write.format("jdbc").mode(SaveMode.Overwrite).option("driver", "com.mysql.jdbc.Driver").option("url", "jdbc:mysql://106.13.32.254:3306/febs_spark_log_pv?useUnicode=true&characterEncoding=UTF-8").option("dbtable", "t_log_uv").option("user", "spark_log_pv").option("password", "123456").save()
    jdbcDF.write.format("jdbc").mode(SaveMode.Overwrite).option("driver", "com.mysql.cj.jdbc.Driver").option("url", "jdbc:mysql://106.13.32.254:3306/febs_spark_log_pv?serverTimezone=UTC&characterEncoding=utf8&useUnicode=true&useSSL=false").option("dbtable", "t_log_uv").option("user", "spark_log_pv").option("password", "123456").save()


  }

  case class PV(time: String, ip: String, pv: Int)

  case class UV(time: String, uv: Int)

}
