package com.jinghang.spark_base._030_SQLDataSource

import org.apache.spark.sql.SparkSession

object _020_BasicParquetExample {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("Spark SQL data sources example")
      .master("local[1]")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()
      spark.sparkContext.setLogLevel("ERROR")
    runBasicParquetExample(spark)
  }

  private def runBasicParquetExample(spark: SparkSession): Unit = {

    val peopleDF = spark.read.json("data/practiceOperator/people.json")

    // DataFrames can be saved as Parquet files, maintaining the schema information
    peopleDF.write.parquet("data/out/people.parquet")

    val parquetFileDF = spark.read.parquet("data/out/people.parquet")

    parquetFileDF.createOrReplaceTempView("parquetFile")
    val namesDF = spark.sql("SELECT name FROM parquetFile WHERE age BETWEEN 13 AND 19")

    import spark.implicits._
    namesDF.map(attributes => "Name: " + attributes(0)).show()
    // +------------+
    // |       value|
    // +------------+
    // |Name: Justin|
    // +------------+
  }

}
