package com.deep.test

import org.apache.hudi.QuickstartUtils._

import scala.collection.JavaConversions._
import org.apache.spark.sql.SaveMode._
import org.apache.hudi.DataSourceReadOptions._
import org.apache.hudi.DataSourceWriteOptions._
import org.apache.hudi.config.HoodieWriteConfig._
import org.apache.spark
import org.apache.spark.{SparkConf, SparkContext}

object HudiSparkDemo2 {


  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setMaster("local[4]")
      .setAppName("test")

    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")

    val tableName = "hudi_trips_cow"
    val basePath = "hdfs://localhost:9000/datas/hudi-warehouse/hudi_trips_cow"
    val dataGen = new DataGenerator

    //准备数据
    val inserts = convertToStringList(dataGen.generateInserts(10))

    //指定两个分区,读取为dataframe格式
    val df = sc.json(sc.parallelize(inserts, 2))
    df.printSchema()
  }


}
