package com.sunzm.spark.sql.hive.calllog

import com.alibaba.fastjson.JSON
import org.apache.commons.lang3.StringUtils
import org.apache.commons.lang3.time.DateFormatUtils
import org.apache.spark.sql.{Dataset, SparkSession}

/**
 *
 * 通过SparkSQL操作Hive
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-07-06 17:54
 */
object SparkSqlHiveCreateTable {
  def main(args: Array[String]): Unit = {

    //这句话需要在 SparkSession 创建之前加
    System.setProperty("HADOOP_USER_NAME", "hdfs")

    val warehouseLocation = "/user/hive/warehouse"
    //val warehouseLocation = new File("spark-warehouse").getAbsolutePath

    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[*]")
      .config("spark.default.parallelism", 8)
      .config("spark.sql.shuffle.partitions", 8)
      //如果集群中有hive相关的配置文件，下面的都可以不写
      .config("fs.defaultFS", "hdfs://192.168.1.158:8020")
      .config("spark.sql.warehouse.dir", warehouseLocation)
      //开启动态分区支持
      .config("hive.exec.dynamic.partition", "true")
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      //如果需要Saprk自己管理元数据,就不需要配置这个,但是需要配置mysql，不然会使用derby数据库
      /*.config("hive.metastore.uris", "thrift://hive01.prd.bj.sobot.com:9083")*/
      /* .config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver")
       .config("javax.jdo.option.ConnectionURL", "jdbc:mysql://10.111.5.98:3306/hive")
       .config("javax.jdo.option.ConnectionUserName", "hive")
       .config("javax.jdo.option.ConnectionPassword", "hive01")*/
      .enableHiveSupport()
      .getOrCreate()

    spark.sparkContext.setLogLevel("WARN")

    import spark.implicits._

    //在 call_db 库中建一个表
    spark.sql("USE call_db")

     spark.sql(
      """
        | CREATE EXTERNAL TABLE IF NOT EXISTS `call_log_record`(
        |	`callRecordId` string,
        |	`parentCallRecordId` string,
        |	`appId` string,
        |	`voiceAliyunUrl` string
        |)
        | PARTITIONED BY (`result_date` string)
        | STORED AS parquet
        | LOCATION '/data/hive/calllog/aliyun/'
        | TBLPROPERTIES("parquet.compress"="snappy")
        |""".stripMargin)

    spark.sql("show tables").show(10, false)

    spark.stop()
  }

}
