package com.sunzm.spark.sql.hive.calllog

import org.apache.spark.sql.SparkSession

/**
 *
 * 通过SparkSQL操作Hive
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-07-06 17:54
 */
object SparkSqlHiveCreateTableTest {
  def main(args: Array[String]): Unit = {

    val warehouseLocation = "/user/hive/warehouse"
    //val warehouseLocation = new File("spark-warehouse").getAbsolutePath

    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[*]")
      .config("spark.default.parallelism", 8)
      .config("spark.sql.shuffle.partitions", 8)
      //如果集群中有hive相关的配置文件，下面的都可以不写
      //.config("fs.defaultFS", "hdfs://192.168.1.158:8020")
      .config("fs.defaultFS", "file:///")
      .config("spark.sql.warehouse.dir", warehouseLocation)
      //如果不开启对Hive的支持,元数据保存在内存中,程序退出,元数据也就丢了
      .enableHiveSupport()
      //开启动态分区支持
      .config("hive.exec.dynamic.partition", "true")
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      .config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver")
      .config("javax.jdo.option.ConnectionURL", "jdbc:mysql://82.156.210.70:3306/hive?useSSL=false")
      .config("javax.jdo.option.ConnectionUserName", "root")
      .config("javax.jdo.option.ConnectionPassword", "ABC123abc.123")
      //高版本的Hive需要设置下面的参数
      .config("hive.metastore.schema.verification", false)
      .config("datanucleus.schema.autoCreateTables", true)
      .getOrCreate()

    spark.sparkContext.setLogLevel("WARN")

    //创建数据库
    //spark.sql("CREATE DATABASE IF NOT EXISTS call_db")

    //在 call_db 库中建一个表
    spark.sql("USE call_db")

    /*spark.sql(
      """
        | CREATE TABLE IF NOT EXISTS `pq_table`(
        |	`callRecordId` string,
        |	`callFlag` int,
        |	`callDuration` bigint
        |) USING JSON
        | PARTITIONED BY (`result_date` string)
        | LOCATION '/data/hive/calllog/json/'
        |""".stripMargin)*/

    /*spark.sql(
      """
        |CREATE EXTERNAL TABLE IF NOT EXISTS `parquet_table`(
        |      `callRecordId` string,
        |      `callFlag` int,
        |      `callDuration` bigint
        | )
        |  PARTITIONED BY (`result_date` string)
        |  STORED AS parquet
        |  LOCATION '/data/hive/calllog/parquet/'
        |  TBLPROPERTIES("parquet.compress"="snappy")
        |""".stripMargin)*/

    spark.sql("show tables").show(10, false)

   /* spark.sql(
      """
        |INSERT INTO parquet_table
        | partition (result_date='2021-07-29')
        |SELECT
        |  callRecordId,
        |  callFlag,
        |  callDuration
          FROM json.`G:/data/hive/calllog/json/result_date=2021-07-29/`
        |""".stripMargin)*/

    spark.sql(s"SELECT * FROM parquet_table WHERE result_date = '2021-07-29' LIMIT 10")
      .show(10, false)

    /*spark.sql(
      """
        |INSERT INTO parquet_table
        | partition (result_date)
        |SELECT
        |  'call001' AS callRecordId,
        |  1 AS callFlag,
        |  30 AS callDuration,
        |  '2021-07-29' AS result_date
        |""".stripMargin)*/

    spark.stop()
  }

}
