package org.shj.spark.sql

import org.apache.spark.sql.SparkSession
import org.shj.spark.util.Util


object ParquetDemo {
  def main(args: Array[String]): Unit = {
    val ss = SparkSession.builder().appName("ParquetDemo").master("local").getOrCreate()
    val sc = ss.sparkContext
    sc.setLogLevel("WARN")
    
    demo1(ss)
    demo2(ss)
    
    ss.stop()
  }
  
  def demo1(ss: SparkSession){
    val df = ss.read.parquet(Util.fullPath("users.parquet"))
    df.printSchema()
    df.show()
  }
  
  def demo2(ss: SparkSession){
    val sc = ss.sparkContext
    // This is used to implicitly convert an RDD to a DataFrame.
    import ss.implicits._
        
    // Create a simple DataFrame, store into a partition directory
    val squareDf = sc.makeRDD(1 to 5).map(i => (i, i*i)).toDF("value", "square")
//    squareDf.write.parquet("/tmp/test_table/key=1")
    squareDf.write.parquet("D:/download/tmp/test_table/key=1")
    
    // Create another DataFrame in a new partition directory,
    // adding a new column and dropping an existing column
    val cubeDf = sc.makeRDD(6 to 10).map(i => (i, i*i*i)).toDF("value", "cubes")
    cubeDf.write.parquet("D:/download/tmp/test_table/key=2")
    
    // Read the partitioned table
    val mergedDF = ss.read.option("mergeSchema", "true").parquet("D:/download/tmp/test_table")
    mergedDF.printSchema()
  }
}