package hudi


import org.apache.hudi.QuickstartUtils._

import scala.collection.JavaConversions._
import org.apache.spark.sql.SaveMode._
import org.apache.hudi.DataSourceReadOptions._
import org.apache.hudi.DataSourceWriteOptions._
import org.apache.hudi.common.model.{HoodieRecord, HoodieRecordPayload}
import org.apache.hudi.config.HoodieWriteConfig
import org.apache.hudi.config.HoodieWriteConfig._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{SaveMode, SparkSession}

/**
 * hudi官方案例
 *
 *
 * 通过spark-shell命令：

spark-shell \
  --packages org.apache.hudi:hudi-spark-bundle_2.11:0.9.0,org.apache.spark:spark-avro_2.11:2.4.8 \
  --conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
 *
 *
 */
object hudi01_test {


  val tableName = "hudi_trips_cow"
  val basePath = "/Users/sevenhong/code/spark_demo/hudi_data_path"
  val dataGen = new DataGenerator


  def main(args: Array[String]): Unit = {


    val ss: SparkSession = SparkSession.builder()
      .master("local[*]")
      .appName("hudi_test")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()


    val list = dataGen.generateInserts(10)


    val inserts = convertToStringList(list)

    val df = ss.read.json(ss.sparkContext.parallelize(inserts, 2))


    /**
    {
      "ts": 1631963816332,
      "uuid": "2b49633e-685e-434e-8500-52f544f4862b",
      "rider": "rider-213",
      "driver": "driver-213",
      "begin_lat": 0.4726905879569653,
      "begin_lon": 0.46157858450465483,
      "end_lat": 0.754803407008858,
      "end_lon": 0.9671159942018241,
      "fare": 34.158284716382845,
      "partitionpath": "americas/brazil/sao_paulo"
    }
     */
    df.write.json("/Users/sevenhong/code/spark_demo/json")


    df.write.format("hudi")
      .options(getQuickstartWriteConfigs)
      .option(PRECOMBINE_FIELD.key(), "ts")
      .option(RECORDKEY_FIELD.key(), "uuid")
      .option(PARTITIONPATH_FIELD.key(), "partitionpath")
      .option(TBL_NAME.key(), tableName)
      .mode(SaveMode.Overwrite)
      .save(basePath)



    val tripsSnapshotDF = ss
      .read
      .format("hudi")
      .load(basePath)
    //load(basePath) use "/partitionKey=partitionValue" folder structure for Spark auto partition discovery

    tripsSnapshotDF.createOrReplaceTempView("hudi_trips_snapshot")

    ss.sql("select fare, begin_lon, begin_lat, ts from  hudi_trips_snapshot where fare > 20.0").show()
    ss.sql("select * from hudi_trips_snapshot").show()

    /**
      hudi的虚拟字段：
      _hoodie_commit_time       20210925140542
      _hoodie_commit_seqno      20210925140542_1_5
      _hoodie_record_key
      _hoodie_partition_path
      _hoodie_file_name

     */

  }

}
