package com.pw.study.task

import org.apache.hudi.DataSourceReadOptions._
import org.apache.hudi.DataSourceWriteOptions._
import org.apache.hudi.QuickstartUtils.{DataGenerator, _}
import org.apache.hudi.config.HoodieWriteConfig._
import org.apache.spark.SparkConf
import org.apache.spark.sql.SaveMode._
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.JavaConversions._

/**
 * hudi项目案例测试
 *
 * @Author: linux_future
 * @since: 2022/11/6
 **/
object HudiDemoTask {


  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME", "atguigu")
    // 创建 SparkSession
    val sparkConf = new SparkConf()
      .set("spark.testing.memory", "512000000")
      .setAppName(this.getClass.getSimpleName)
      .setMaster("local[*]")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sparkSession = SparkSession.builder()
      .config(sparkConf)
      .enableHiveSupport()
      .getOrCreate()
    //1、初始化数据
    val tableName = "hudi_trips_cow"
    initTableData(sparkSession, tableName)

    //2.1读取数据表
    //val df1: DataFrame = readTableData(sparkSession, tableName)

    //3.测试数据
    readTableData2(sparkSession, tableName)

  }

  /**
   * 读取数据表2
   *
   * @param spark
   * @param tableName
   */
  def readTableData2(spark: SparkSession, tableName: String): Unit = {
    import spark.implicits._

    val basePath = s"hdfs://hadoop102:8020/hudi/$tableName"
    val df = spark.read.format("hudi").load(basePath)

    df.createOrReplaceTempView("hudi_trips_sp")
    val dfSelect = spark.sql("select distinct(_hoodie_commit_time) as commitTime from  hudi_trips_sp order by commitTime")
    dfSelect.map(k => k.getString(0)).show()
    val commits = dfSelect
      .map(k => k.getString(0)).take(50)
    val beginTime = "000"
    val endTime = commits(commits.length - 2)

    val dfCommits: DataFrame = spark.read.format("hudi").
      option(QUERY_TYPE.key(), QUERY_TYPE_INCREMENTAL_OPT_VAL).
      option(BEGIN_INSTANTTIME.key(), beginTime).
      option(END_INSTANTTIME.key(), endTime).
      load(basePath)
    dfCommits.show()

  }

  /**
   * 读取表中数据
   *
   * @param spark
   * @param tableName
   */
  def readTableData(spark: SparkSession, tableName: String): DataFrame = {
    val basePath = s"hdfs://hadoop102:8020/hudi/$tableName"
    val df = spark.read.format("hudi").load(basePath)
    val dfSelect: DataFrame = df.select("begin_lat", "begin_lon", "driver", "end_lat", "end_lon", "fare", "rider", "ts", "uuid")

    //时间旅行查询写法一
    spark.read.
      format("hudi").
      //20221106212357686
      //20210728141108100
      option("as.of.instant", "20221106213828458").
      load(basePath).show(20)

    dfSelect
  }


  /**
   * 初始化数据
   *
   * @param sparkSession
   * @param tableName
   */
  def initTableData(sparkSession: SparkSession, tableName: String) = {
    val basePath = s"hdfs://hadoop102:8020/hudi/$tableName"
    val dataGen = new DataGenerator

    val inserts = convertToStringList(dataGen.generateInserts(20))
    val df = sparkSession.read.json(sparkSession.sparkContext.parallelize(inserts, 2))

    df.write.format("hudi").
      options(getQuickstartWriteConfigs).
      option(PRECOMBINE_FIELD.key(), "ts").
      option(RECORDKEY_FIELD.key(), "uuid").
      option(PARTITIONPATH_FIELD.key(), "partitionpath").
      option(TBL_NAME.key(), tableName).
      mode(Append).
      save(basePath)
  }
}
