package com.hrt.hudi.spark

import org.apache.hudi.DataSourceWriteOptions
import org.apache.hudi.config.HoodieWriteConfig
import org.apache.hudi.hive.MultiPartKeysValueExtractor
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object HudiSQL {
  def main(args: Array[String]): Unit = {
    val session: SparkSession = SparkSession.builder().appName("test")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .enableHiveSupport()
      .master("local").getOrCreate()

    session.sql(
      """
        |select * from infos3_rt
      """.stripMargin).show()


//    val df: DataFrame = session.read.json("file:///D:\\2018IDEA_space\\MyHudiCode\\data\\jsondata.json")
//    val df: DataFrame = session.read.json("file:///D:\\2018IDEA_space\\MyHudiCode\\data\\updatedata.json")

//    df.write.format("hudi")
//      .option(DataSourceWriteOptions.TABLE_TYPE_OPT_KEY,DataSourceWriteOptions.MOR_TABLE_TYPE_OPT_VAL)
//      .option(DataSourceWriteOptions.RECORDKEY_FIELD_OPT_KEY,"id")
//      .option(DataSourceWriteOptions.PARTITIONPATH_FIELD_OPT_KEY,"loc")
//      .option(DataSourceWriteOptions.PRECOMBINE_FIELD_OPT_KEY,"data_dt")
//      .option("hoodie.upsert.shuffle.parallelism","2")
//      .option("hoodie.insert.shuffle.parallelism","2")
//      .option(DataSourceWriteOptions.HIVE_URL_OPT_KEY,"jdbc:hive2://node1:10000")
//      .option(DataSourceWriteOptions.HIVE_DATABASE_OPT_KEY,"default")
//      .option(DataSourceWriteOptions.HIVE_TABLE_OPT_KEY,"infos2")
//      .option(DataSourceWriteOptions.HIVE_PARTITION_FIELDS_OPT_KEY,"loc")
//      .option(DataSourceWriteOptions.HIVE_SYNC_ENABLED_OPT_KEY,"true")
//      .option(DataSourceWriteOptions.HIVE_PARTITION_EXTRACTOR_CLASS_OPT_KEY,classOf[MultiPartKeysValueExtractor].getName)
//      .option(HoodieWriteConfig.TABLE_NAME,"person_infos3")
//      .mode(SaveMode.Append)
//      .save("/hudi_data/person_infos3")
  }

}
