package com.sugon.hudi

import java.util

import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.hudi.QuickstartUtils._

import scala.collection.JavaConversions._
import org.apache.spark.sql.SaveMode._
import org.apache.hudi.DataSourceReadOptions._
import org.apache.hudi.{DataSourceReadOptions, DataSourceWriteOptions}
import org.apache.hudi.DataSourceWriteOptions._
import org.apache.hudi.config.{HoodieCompactionConfig, HoodieStorageConfig}
import org.apache.hudi.config.HoodieWriteConfig._
import org.apache.spark.SparkContext


object HDTest {

  def main(args: Array[String]): Unit = {

    //  System.setProperty("HADOOP_USER_NAME", "root")

    val spark = SparkSession.builder().appName("cs")
      .master("local[*]")
      //      .enableHiveSupport()
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()

    //    val context: SparkContext = spark.sparkContext
    //    context.hadoopConfiguration.set("fs.defaultFS", "hdfs://slave01:8020")
    //    val frame: DataFrame = spark.read.orc("/admin/12/ysk/douhj0828")
    //
    //    frame.show()

    val tableName = "hudi_trips_cow"
    val basePath = "file:///tmp/hudi_trips_cow"

    //    queryData(spark, basePath);


    //    val frame: DataFrame = spark.read.format("hudi").load("file:///tmp/hudi_trips_cow/*/*/*/*")

    //    println("-------" + frame.count())


    //        val roViewDF = spark.
    //        read.
    //        format("org.apache.hudi").
    //        load(basePath + "/*/*/*/*")
    //        load(basePath) 如果使用 "/partitionKey=partitionValue" 文件夹命名格式，Spark将自动识别分区信息
    //
    //        roViewDF.registerTempTable("hudi_ro_table")
    //        spark.sql("select fare, begin_lon, begin_lat, ts from  hudi_ro_table where fare > 20.0").show()
    //        spark.sql("select _hoodie_commit_time, _hoodie_record_key, _hoodie_partition_path, rider, driver, fare from  hudi_ro_table").show()

    val dataGen = new DataGenerator

    val updates: util.List[String] = convertToStringList(dataGen.generateUpdates(10))

    val df: DataFrame = spark.read.json(spark.sparkContext.parallelize(updates, 2));
    df.write.format("org.apache.hudi").
      options(getQuickstartWriteConfigs).
      option(PRECOMBINE_FIELD_OPT_KEY, "ts").
      option(RECORDKEY_FIELD_OPT_KEY, "uuid").
      option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
      option(TABLE_NAME, tableName).
      mode(Append).
      save(basePath);
  }

  def saveData(sparkSession: SparkSession, tableName: String, basePath: String): Unit = {

    val dataGen = new DataGenerator

    val inserts = convertToStringList(dataGen.generateInserts(10))
    val dfSave = sparkSession.read.json(sparkSession.sparkContext.parallelize(inserts, 2))

    /**
      * +-------------------+-------------------+----------+-------------------+-------------------+------------------+--------------------+---------+---+--------------------+
      * |          begin_lat|          begin_lon|    driver|            end_lat|            end_lon|              fare|       partitionpath|    rider| ts|                uuid|
      * +-------------------+-------------------+----------+-------------------+-------------------+------------------+--------------------+---------+---+--------------------+
      * | 0.4726905879569653|0.46157858450465483|driver-213|  0.754803407008858| 0.9671159942018241|34.158284716382845|americas/brazil/s...|rider-213|0.0|3b681170-aae7-4b0...|
      * | 0.6100070562136587| 0.8779402295427752|driver-213| 0.3407870505929602| 0.5030798142293655|  43.4923811219014|americas/brazil/s...|rider-213|0.0|008bb5b1-0a08-4cc...|
      * | 0.5731835407930634| 0.4923479652912024|driver-213|0.08988581780930216|0.42520899698713666| 64.27696295884016|americas/united_s...|rider-213|0.0|9f06c535-bf69-46e...|
      * |0.21624150367601136|0.14285051259466197|driver-213| 0.5890949624813784| 0.0966823831927115| 93.56018115236618|americas/united_s...|rider-213|0.0|8e722d59-7a68-4a8...|
      * |   0.40613510977307| 0.5644092139040959|driver-213|  0.798706304941517|0.02698359227182834|17.851135255091155|  asia/india/chennai|rider-213|0.0|b28f3157-a1c3-4d1...|
      * | 0.8742041526408587| 0.7528268153249502|driver-213| 0.9197827128888302|  0.362464770874404|19.179139106643607|americas/united_s...|rider-213|0.0|9e747a45-86ba-46e...|
      * | 0.1856488085068272| 0.9694586417848392|driver-213|0.38186367037201974|0.25252652214479043| 33.92216483948643|americas/united_s...|rider-213|0.0|5da90f3c-f0bb-4f4...|
      * | 0.0750588760043035|0.03844104444445928|driver-213|0.04376353354538354| 0.6346040067610669| 66.62084366450246|americas/brazil/s...|rider-213|0.0|ba5c6568-fadc-42b...|
      * |  0.651058505660742| 0.8192868687714224|driver-213|0.20714896002914462|0.06224031095826987| 41.06290929046368|  asia/india/chennai|rider-213|0.0|5d2e900c-8ad3-429...|
      * |0.11488393157088261| 0.6273212202489661|driver-213| 0.7454678537511295| 0.3954939864908973| 27.79478688582596|americas/united_s...|rider-213|0.0|eaf7eb4a-1a37-4ba...|
      * +-------------------+-------------------+----------+-------------------+-------------------+------------------+--------------------+---------+---+--------------------+
      */
    dfSave.show(10)

    dfSave.write.format("hudi").
      options(getQuickstartWriteConfigs).
      // 实际写入之前在preCombining中使用的字段。 当两个记录具有相同的键值时，我们将使用Object.compareTo(..)从precombine字段中选择一个值最大的记录
      option(PRECOMBINE_FIELD_OPT_KEY, "ts").
      option(RECORDKEY_FIELD_OPT_KEY, "uuid").
      option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
      option(TABLE_NAME, tableName).
      //      option(HoodieCompactionConfig.PARQUET_SMALL_FILE_LIMIT_BYTES, String.valueOf(5 * 1024)).
      //      option(HoodieStorageConfig.PARQUET_FILE_MAX_BYTES, String.valueOf(10 * 1024)).
      mode(Overwrite).
      save(basePath)
  }

  /**
    * 查询file 上的hudi数据
    *
    * @param sparkSession
    */
  def queryData(sparkSession: SparkSession, basePath: String) = {

    val roViewDF: DataFrame = sparkSession.
      read.
      format("org.apache.hudi").
      load(basePath + "/*/*/*/*")
    //load(basePath) 如果使用 "/partitionKey=partitionValue" 文件夹命名格式，Spark将自动识别分区信息
    roViewDF.show()
    println("count:" + roViewDF.count())
  }
}
