package com.sunzm.spark.sql.table

import org.apache.spark.sql.{Encoders, SparkSession}

/**
 *
 * ${DESCRIPTION}
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-07-23 13:56
 */
object SparkSqlTableDemo {

  def main(args: Array[String]): Unit = {
    val warehouseLocation = "/user/hive/warehouse"
    //val warehouseLocation = new File("spark-warehouse").getAbsolutePath

    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[*]")
      .config("spark.default.parallelism", 8)
      .config("spark.sql.shuffle.partitions", 8)
      //如果集群中有hive相关的配置文件，下面的都可以不写
      //.config("fs.defaultFS", "hdfs://192.168.1.158:8020")
      .config("fs.defaultFS", "file:///")
      .config("spark.sql.warehouse.dir", warehouseLocation)
      //如果不开启对Hive的支持,元数据保存在内存中,程序退出,元数据也就丢了
      .enableHiveSupport()
      //开启动态分区支持
      .config("hive.exec.dynamic.partition", "true")
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      .config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver")
      .config("javax.jdo.option.ConnectionURL", "jdbc:mysql://82.156.210.70:3306/hive?useSSL=false")
      .config("javax.jdo.option.ConnectionUserName", "root")
      .config("javax.jdo.option.ConnectionPassword", "ABC123abc.123")
      //高版本的Hive需要设置下面的参数
      .config("hive.metastore.schema.verification", false)
      .config("datanucleus.schema.autoCreateTables", true)
      .getOrCreate()

    spark.sparkContext.setLogLevel("WARN")

    //createTable(spark)

    //insertData(spark)

    //functionDemo(spark)

    selectDemo(spark)

    spark.stop()
  }

  case class Person(name: String, age: Int)

  def selectDemo(spark: SparkSession) = {

    //准备测试数据

    val seq = Seq(
      //姓名, 年龄
      Person("Zen Hui", 25),
      Person("Anil B", 18),
      Person("Shone S", 16),
      Person ("Mike A", 25),
      Person("John A", 18),
      Person("Jack N", 16)
    )

    import spark.implicits._

    spark.createDataset(seq)
      .createOrReplaceTempView("person")

    spark.sql(
      """
        |
        |SELECT parse_url('http://spark.apache.org/path?query=1', 'HOST') AS host
        |""".stripMargin)
      .show(10, false)
  }

  def functionDemo(spark: SparkSession) = {
    //显示所有的函数
    spark.sql("SHOW FUNCTIONS").show(10, false)

    //显示所有包含 null 的函数
    spark.sql("SHOW FUNCTIONS LIKE '*null*'").show(10, false)

    //显示函数的描述信息
    spark.sql("DESC FUNCTION LATERAL VIEW").show(10, false)
  }

  def insertData(spark: SparkSession) = {

    val resultDate = "2021-07-23"

    /*spark.sql(
      s"""
         |INSERT INTO call_log
         | PARTITION(resultDate = '${resultDate}')
         | SELECT  callRecordId,
         | parentCallRecordId,
         | companyId,
         | staffId,
         | caller,
         | callee,
         | direction,
         | callWay,
         | callFlag,
         | startTime,
         | callDuration,
         | feeDuration
         | FROM json.`data/spark/sql/table/call-tmp-result.dat`
         |""".stripMargin)*/

    spark.sql("SELECT * FROM call_log LIMIT 10").show(10, false)
  }

  def createTable(spark: SparkSession) = {

    spark.sql("SHOW DATABASES").show()

    //spark.sql("USE calllog")

    spark.sql("DROP TABLE IF EXISTS call_log")

    spark.sql(
      s"""
         |CREATE TABLE IF NOT EXISTS call_log(
         | callRecordId string COMMENT '通话Id',
         | parentCallRecordId string COMMENT '主通话Id',
         | companyId string COMMENT '公司Id',
         | staffId string COMMENT '坐席Id',
         | caller string COMMENT '主叫号码',
         | callee string COMMENT '被叫号码',
         | direction int COMMENT '呼叫类型',
         | callWay int COMMENT '接听方式',
         | callFlag int COMMENT '接听标识,1已接听,0未接听',
         | startTime bigint COMMENT '通话开始时间',
         | callDuration bigint COMMENT '通话时长',
         | feeDuration bigint COMMENT '计费时长',
         | resultDate string COMMENT '日期, yyyy-MM-dd'
         | ) USING JSON
         |  PARTITIONED BY (resultDate)
         |  CLUSTERED BY (companyId)
         |      SORTED BY (startTime ASC)
         |  INTO 10 BUCKETS
         |  LOCATION '/data/hive/call/aliyun/'
         |  COMMENT '通话日志表'
         |""".stripMargin)

    spark.sql("show tables").show(10, false)
    spark.sql("show create table call_log").show(10, false)
  }
}
