package com.guchenbo.spark.sql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

/**
 * @author guchenbo
 * @date 2022/4/14
 */
object DefaultPartition {

  def hiveWithTextfile(spark: SparkSession) = {
    //    hive表，textfile格式，使用HadoopRDD，读取文件
    val sql = "select * from turing_monitor.score_test"
    val df: DataFrame = spark.sql(sql)


    /**
     * 22/04/14 16:00:10 INFO HadoopRDD: Input split: hdfs://ark204:8020/user/hive/warehouse/turing_monitor.db/score_test/score.csv:268435456+145344515
     * 22/04/14 16:00:10 INFO HadoopRDD: Input split: hdfs://ark204:8020/user/hive/warehouse/turing_monitor.db/score_test/score.csv:134217728+134217728
     * 22/04/14 16:00:10 INFO HadoopRDD: Input split: hdfs://ark204:8020/user/hive/warehouse/turing_monitor.db/score_test/score.csv:0+134217728
     */

    df.collect()
  }

  def hiveWithParquet(spark: SparkSession) = {
    //    spark.conf.set("spark.sql.hive.convertMetastoreParquet", "false")
    //    hive表，textfile格式，使用HadoopRDD，读取文件
    var sql = "select * from turing_monitor.score_hive_parquet"
    var df: DataFrame = spark.sql(sql)

    //    hive表，分区表，spark格式的分桶表（hive格式分桶，spark不认识）
    sql = "select * from turing_monitor.string_test_par_bu"
    df = spark.sql(sql)

    //df.show()
    df.collect()
  }


  def nonPartitionBucket(spark: SparkSession): Unit = {

  }

  def csv(spark: SparkSession): Unit = {

    def saveBucket(): Unit = {
      val df = spark.read.format("csv").load("input/csv/score_test.csv")

      df.write.format("csv").bucketBy(2, "type").save("input/csv/bucket")
    }

    //        saveBucket()
    def savePartition(): Unit = {
      val df = spark.read.format("csv").option("header", "true").load("input/csv/score_test.csv")
      df.write.format("csv").partitionBy("ds").save("input/csv/part")
    }

    //        savePartition()

    def nonPartitionNonBucket() = {
      var path = "input/csv/score.csv"
//      path = "input/csv/nopart"
      val df: DataFrame = spark.read.format("csv").load(path)
      df.collect()
    }

        nonPartitionNonBucket()

    def partitionNonBucket() = {
      val df: DataFrame = spark.read.format("csv").load("input/csv/part")
      df.collect()
    }

//    partitionNonBucket()


    //    nonPartitionBucket(spark)
  }


  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("demo").master("local[20]")
      .config("hive.metastore.uris", "thrift://ark150:9083").enableHiveSupport().getOrCreate()

    //        hiveWithTextfile(spark)
    //
//    hiveWithParquet(spark)


    //    val rdd: RDD[Row] = df.rdd
    //println(rdd.getNumPartitions)

    //    df.show()

        csv(spark)

  }
}
