package com.study.spark.scala.hive

import org.apache.spark.sql.{SaveMode, SparkSession}

/**
 *
 * @author stephen
 * @date 2019-10-10 09:54
 */
object HiveDemo {

  def main(args: Array[String]): Unit = {
    // 0、将hive-site.xml配置文件放到resource下
    // 1、初始化
    val spark = SparkSession.builder()
      .appName("Hive Demo")
      .master("local[*]")
      .enableHiveSupport()
      .config("sql.hive.exec.dynamic.partition", "true")
      .config("sql.hive.exec.dynamic.partition.mode", "nonstrict")
      .config("sql.hive.exec.max.dynamic.partitions", 2000)
      .getOrCreate()
    // 3.1 读取Hive 方式一
    spark.sql("select * from demo_db.demo_table").limit(10).show()
    // 3.2 读取Hive 方式二
    spark.table("demo_db.demo_table").createOrReplaceTempView("tmp_table")
    spark.sql("select * from tmp_table").limit(10).show()
    // 3.3 读取Hive 方式三
    val sqlStr =
      """
        | select
        | *
        | from demo_db.demo_table
        | limit 10
      """.stripMargin
    spark.sql(sqlStr).show()

    // 4.1 写入Hive 方式一：读取本地数据写入Hive
    val df = spark.read.parquet("/parquet/file")
    df.write.mode("append").format("parquet")
      .saveAsTable("demo_db.demo_table")
    // 4.2 写入Hive 方式二：读取本地数据写入Hive
    spark.sql("load data local inpath ‘/data/xx.csv’" +
      " overwrite into table demo_db.demo_table")
    // 4.3 写入Hive 方式三：查询表数据写入Hive
    val dataFrame = spark.sql(sqlStr)
    dataFrame.write.insertInto("demo_db.demo_table")

    // 5.1 写入hive分区表 方式一
    spark.sql("insert into demo_db.table2 partition(date='2015-04-02') select name,age,sex from demo_db.table1")
    // 5.2 写入hive分区表 方式二
    df.write.mode(SaveMode.Append).format("hive").partitionBy("date").saveAsTable("demo_db.table2")
    // 5.3 写入hive分区表 方式三
    spark.sql("insert into test_partition select * from temp_table")
    df.write.insertInto("test_partition")

    // insertInto不需要也不能将df进行partitionBy
    // df.write.partitionBy("year").insertInto("test_partition") //这样会抛出异常
  }
}
