package spark

import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * 京东用户日志离线分析
 */
object SparkJdLogSqlApp {
  def main(args: Array[String]): Unit = {
    //1.SparkSession会话对象
    val spark: SparkSession=SparkSession.builder()
      .appName("SparkJdLogSqlApp")
      .master("local[2]")
      .config("spark.sql.shuffle.partitions","2")
      .getOrCreate()
    import spark.implicits._

    spark.sparkContext.setLogLevel("WARN")

    //2.读取数据
    val logDF: DataFrame=spark.read
      .option("sep",",")
      .schema("ip STRING, user_id STRING, url STRING,access_time STRING")
      .csv("zg6-spark-lecture/src/main/resources/jd-behavior.log")
//      logDF.printSchema()
//      logDF.show(10,truncate = false)

    //注册临时表
    logDF.createTempView("tmp_jd_log")
    spark.sql("select * from tmp_jd_log").show(false)

    //4.分析1；京东每日的uv,降序排序，打印输出
    val df4=spark.sql(
      """
        |select
        | substring(access_time,1,10) AS day_str
        | , count(distinct user_id) AS uv
        |from tmp_jd_log
        |group by substring(access_time,1,10)
        |""".stripMargin)
    df4.show(false);

    //5.分析2：京东每日浏览量pv,降序排序，打印输出
    val df5 = spark.sql(
      """
        |select
        | substring(access_time,1,10) AS day_str
        |,count(url) AS nv
        |from tmp_jd_log
        |group by substring(access_time,1,10)
        |""".stripMargin)
    df5.show(false);

    //6.分析3：京东每个url每日访客数uv,降序排序，打印输出
    val df6 = spark.sql(
      """
        |select
        | url
        |,substring(access_time,1,10) AS day_str
        |,count(distinct user_id) AS nv
        |from tmp_jd_log
        |group by url,substring(access_time,1,10)
        |""".stripMargin)
    df6.show(false);

    //7.分析4：京东每个url每日浏览量pv,降序排序，保存Mysql表：result_pv
    val df7 = spark.sql(
      """
        |select
        | url
        |,substring(access_time,1,10) AS day_str
        |, count(url) AS pv
        |from tmp_jd_log
        |group by url,substring(access_time,1,10)
        |order by pv desc
        |""".stripMargin)
    df7.show(false);

//    df7.write
//      .mode(SaveMode.Overwrite)
//      .format("jdbc")
//      .option("url", "jdbc:mysql://node101:3306/db_test?createDatabaseIfNotExist=true&useSSL=false&useUnicode=true&characterEncoding=UTF-8")
//      .option("dbtable", "db_test.result_pv")
//      .option("user", "root")
//      .option("password", "123456")
//      .save()

    //8.SparkSQL定义UDF函数：从字段datetime获取每天日期
    spark.udf.register(
      "extract_date",
      (accessTime:String)=>{
        accessTime.substring(0,10)
      }
    )

    //9.分析5：编写SQL语句，使用UDF函数，获取日期，打印输出
    val df9:DataFrame=spark.sql(
      """
        |select
        | access_time
        |,substring(access_time,1,10) AS day_str
        |, extract_date(access_time) AS date_str
        |from tmp_jd_log
        |""".stripMargin)
    df9.show(false);

    //关闭
    spark.stop();
  }
}
