package org.shj.spark.sql

import org.apache.spark.sql.SparkSession


/**
 * 本程序在本地执行报错，需运行在 Spark集群上，需要Hadoop的支持
 */
object ParquetTest {
  def main(args: Array[String]): Unit = {
    val ss = SparkSession.builder().appName("DatasetTest").master("local").getOrCreate();
    
    // This is used to implicitly convert an RDD to a DataFrame.
    import ss.implicits._
    
    ss.sparkContext.setLogLevel("WARN")
    // Create a simple DataFrame, store into a partition directory
    val squaresDF = ss.sparkContext.makeRDD(1 to 5).map(i => (i, i * i)).toDF("value", "square")
    squaresDF.write.parquet("data/test_table/key=1")
    
    // Create another DataFrame in a new partition directory,
    // adding a new column and dropping an existing column
    val cubesDF = ss.sparkContext.makeRDD(6 to 10).map(i => (i, i * i * i)).toDF("value", "cube")
    cubesDF.write.parquet("data/test_table/key=2")
    
    // Read the partitioned table
    val mergedDF = ss.read.option("mergeSchema", "true").parquet("data/test_table")
    mergedDF.printSchema()
    
  }
}