package Hudi_SelfStudy.spark_sql_study

import org.apache.spark.sql.SparkSession

object study_02 {
  def main(args: Array[String]): Unit = {

    //  创建分区表并指定存储位置


    //  创建一个cow分区外部表，指定primaryKey和preCombineField
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("test")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    spark.sql("use hudi_study")

    //  将表写入到虚拟机的文件系统上
    //  需要注意的是，表指定分区并不放在表属性里面，放在外面
    spark.sql(
      """
        |create table hudi_cow_pt_tbl(
        |id bigint,
        |name string,
        |ts bigint,
        |dt string,
        |hh string
        |)using hudi
        |tblproperties(
        |type='cow',
        |primaryKey='id',
        |preCombineField='ts'
        |)
        |partitioned by (dt,hh)
        |location 'hdfs://192.168.40.110:9000/user/hudi/hudi_cow_pt_tbl/'
        |""".stripMargin)




    //  关闭环境
     spark.close()

  }

}
