package Hudi_SelfStudy.spark_shell_study

import org.apache.spark.sql.{Dataset, Row, SparkSession}

object query {
  def main(args: Array[String]): Unit = {
        //  准备sparksql集成hudi的环境
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("查看数据")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    //   数据路径
    val data_path="hdfs://192.168.40.110:9000/user/hudi/hudi_trips_cow/"

    //  读取数据
     val data:Dataset[Row]=spark.read.format("hudi")
       .load(data_path)
    //  将读取到的数据创建临时表
        data.createOrReplaceTempView("temp01")

    //  根据临时表查看数据
    spark.sql("select * from temp01").show





    spark.close()
  }

}
