package com.sunzm.spark.sql.hive.report.dws

import org.apache.spark.sql.SparkSession

/**
 * 用户画像类指标计算
 */
object DWSUserProfile {
  def main(args: Array[String]): Unit = {
    val warehouseLocation = "/user/hive/warehouse"
    //val warehouseLocation = new File("spark-warehouse").getAbsolutePath

    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[*]")
      .config("spark.default.parallelism", 8)
      .config("spark.sql.shuffle.partitions", 8)
      //如果集群中有hive相关的配置文件，下面的都可以不写
      //.config("fs.defaultFS", "hdfs://192.168.1.158:8020")
      .config("fs.defaultFS", "file:///")
      .config("spark.sql.warehouse.dir", warehouseLocation)
      //如果不开启对Hive的支持,元数据保存在内存中,程序退出,元数据也就丢了
      .enableHiveSupport()
      //开启动态分区支持
      .config("hive.exec.dynamic.partition", "true")
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      .config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver")
      .config("javax.jdo.option.ConnectionURL", "jdbc:mysql://82.156.210.70:3306/hive?useSSL=false")
      .config("javax.jdo.option.ConnectionUserName", "root")
      .config("javax.jdo.option.ConnectionPassword", "ABC123abc.123")
      .getOrCreate()

    spark.sparkContext.setLogLevel("WARN")

    //计算每个事件的发生次数，以及演示行转列
    //everyEventTimes(spark)

    //计算PV和会话数,以及演示 列转行
    //pvAndSessionCount(spark)

    lateralviewexplode(spark)

    spark.stop()
  }

  /**
   * 计算每个事件的发生次数，以及演示行转列
   *
   * @param spark
   */
  def everyEventTimes(spark: SparkSession) = {
    import spark.implicits._

    //准备测试数据
    val seq = Seq(
      //用户ID, 事件Id, 时间
      (1, "a", 1),
      (1, "a", 3),
      (1, "c", 5),
      (1, "b", 10),
      (1, "d", 20),
      (1, "a", 50),
      (1, "c", 60)
    )

    spark.createDataset(seq)
      .toDF("userId", "eventId", "ts")
      .createOrReplaceTempView("v_app_event_detail")

    //计算每个事件的次数
    spark.sql(
      """
        | SELECT
        |   eventId,
        |   COUNT(*) AS times
        | FROM v_app_event_detail
        |   GROUP BY eventId
        |""".stripMargin)
      .createOrReplaceTempView("v_tmp")
    //.show(10, false)

    /**
     * 结果如下
     * +-------+-----+
     * |eventId|times|
     * +-------+-----+
     * |b      |1    |
     * |a      |3    |
     * |c      |2    |
     * |d      |1    |
     * +-------+-----+
     */

    //PIVOT 子句用于数据透视。我们可以根据特定的列值获取聚合值，这些值将转换为 SELECT 子句中使用的多个列。
    // PIVOT (
    //    aggregate_expression
    //    FOR column_list IN ( expression_list )
    // )
    //下面的SQL语句可以把上面的4行数据变成4列数据，结果如下:
    /**
     * +-------+-------+-------+-------+
     * |a_times|b_times|c_times|d_times|
     * +-------+-------+-------+-------+
     * |3      |1      |2      |1      |
     * +-------+-------+-------+-------+
     */
    spark.sql(
      """
        |SELECT * FROM v_tmp
        |    PIVOT (
        |       SUM(times) AS times
        |       FOR eventId IN (
        |         'a' AS a_times,
        |         'b' AS b_times,
        |         'c' AS c_times,
        |         'd' AS d_times)
        |    )
        |""".stripMargin)
      .show(10, false)

  }

  /**
   * 计算PV数和会话数,以及演示 列转行
   *
   * @param spark
   */
  def pvAndSessionCount(spark: SparkSession) = {
    import spark.implicits._

    //准备测试数据
    val seq = Seq(
      //用户ID, 会话ID, 时间, 页面URL
      (1, "s101", 1, "baidu"),
      (1, "s101", 5, "tengxun"),
      (1, "s101", 10, "taobao"),
      (1, "s102", 8, "aiqiyi"),
      (2, "s201", 20, "aiqiyi")
    )

    spark.createDataset(seq)
      .toDF("user_id", "session_id", "ts", "page_url")
      .createOrReplaceTempView("v_app_event_detail")

    //计算每个用户的pv数、会话数
    /**
     * +-------+--------+-------------+
     * |user_id|pv_count|session_count|
     * +-------+--------+-------------+
     * |1      |4       |2            |
     * |2      |1       |1            |
     * +-------+--------+-------------+
     */
    spark.sql(
      """
        |SELECT
        |   user_id,
        |   COUNT(1) AS pv_count,
        |   COUNT(DISTINCT session_id) AS session_count
        |FROM v_app_event_detail
        |   GROUP BY user_id
        |""".stripMargin)
      //.show(10, false)
      .createOrReplaceTempView("v_tmp")

    //把上面的pv_count和session_count 2列数据变成2行
    //LATERAL VIEW 应用于每一行，生成一个虚拟表
    /**
     * +-------+-------------+---------+
     * |user_id|tag_name     |tag_value|
     * +-------+-------------+---------+
     * |1      |pv_count     |4        |
     * |1      |session_count|2        |
     * |2      |pv_count     |1        |
     * |2      |session_count|1        |
     * +-------+-------------+---------+
     */
    // LATERAL VIEW有点类似笛卡儿积
    /**
     * 比如
     * 1,[张三,李四,王五]
     *
     * SELECT
     *    userId,
     *    userName
     * FROM t LATERAL VIEW EXPLODE(names) exp_name AS userName
     *
     * 得到的结果就是:
     * 1,张三
     * 1,李四
     * 1,王五
     */
    spark.sql(
      """
        | SELECT
        |   user_id,
        |   tag_name,
        |   tag_value
        |  FROM
        | (SELECT
        |   user_id,
        |   map(
        |     'pv_count', pv_count,
        |     'session_count', session_count
        |   ) kvs
        |FROM v_tmp) t
        |   LATERAL VIEW EXPLODE(kvs) AS tag_name, tag_value
        |""".stripMargin)
      .show(10, false)
  }

  def lateralviewexplode(spark: SparkSession): Unit ={

    //演示数组的炸裂
    spark.sql(
      """
        |SELECT array('zhangsan','lisi','wangwu') AS names
        |""".stripMargin)
      //.show(10, false)
      .createOrReplaceTempView("v_user")

    //比如数组中有3个元素，就会变成3行数据, AS name表示列名设置为name
    spark.sql(
      """
        |SELECT EXPLODE(array('zhangsan','lisi','wangwu')) AS name
        |""".stripMargin)
      .show(10, false)

    //lateral（莱特若） view 侧面视图
    //用法：LATERAL VIEW udtf(expression) tableAlias AS columnAlias
    spark.sql(
      """
        | SELECT
        |   name
        | FROM v_user LATERAL VIEW
        |     EXPLODE(names) exp_name
        | AS name
        |""".stripMargin)
      .show(10, false)


    //演示map的炸裂
    //准备数据
    val mapDF = spark.sql(
      """
        | SELECT
        |   '张三' AS name,
        |   map(
        |	     'age', 17,
        |      'addr', '北京'
        |    ) AS kvs
        |
        |  UNION ALL
        |
        |   SELECT
        |   '李四' AS name,
        |   map(
        |	     'age', 18,
        |      'addr', '上海'
        |    ) AS kvs
        |""".stripMargin)
    mapDF.show(10, false)
    mapDF.createOrReplaceTempView("v_map_data")
    spark.sql(
      """
        |SELECT
        |	name,
        |	new_key,
        |	new_value
        |FROM
        | v_map_data
        | LATERAL VIEW EXPLODE(kvs) AS new_key, new_value
        |""".stripMargin)
      .show(10, false)
  }
}
