package com.xzx.spark.tuning.utils

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

/**
 *
 * ${DESCRIPTION}
 *
 * @author xinzhixuan
 * @version 1.0
 * @date 2022-02-26 10:17 AM
 */
object HelloWorld {

  def main(args: Array[String]): Unit = {
    // embeddedHive()
    externalHive()
  }

  def externalHive(): Unit = {
    System.setProperty("HADOOP_USER_NAME", "root")
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("HelloWorld")
    val sparkSession = SparkSession.builder().config(sparkConf)
      .config("hive.metastore.uris", "thrift://db2.statis.txdev:9083")//访问hive元数据
      .config("spark.sql.warehouse.dir", "hdfs:///user/hive/warehouse")//创建表、库时文件存储路径
      .enableHiveSupport()// 开启hive的支持
      .getOrCreate()
    val hadoopConfiguration = sparkSession.sparkContext.hadoopConfiguration//高可用hdfs的配置
    hadoopConfiguration.set("fs.defaultFS", "hdfs://nameservice1")
    hadoopConfiguration.set("dfs.nameservices", "nameservice1")
    hadoopConfiguration.set("dfs.ha.namenodes.nameservice1", "namenode1,namenode2")
    hadoopConfiguration.set("dfs.namenode.rpc-address.nameservice1.namenode1", "db2.statis.txdev:8020")
    hadoopConfiguration.set("dfs.namenode.rpc-address.nameservice1.namenode2", "db6.statis.txdev:8020")
    hadoopConfiguration.set("dfs.client.failover.proxy.provider.nameservice1", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider")

    sparkSession.sql("create database if not exists xzx")
    sparkSession.sql("show databases").show(100)
    sparkSession.close()
  }

  private def embeddedHive(): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("HelloWorld")
    val sparkSession = SparkSession.builder().config(sparkConf).getOrCreate()
    // sparkSession.sql("create database test")
    sparkSession.sql("show databases").show()
    sparkSession.close()
  }
}
