package com.arnold.guide.sparkSql.demo05

import org.apache.spark.sql.SparkSession

/**
  * Created by arnold.zhu on 2017/7/24.
  */
object Demo05 {

  private val sparkSession: SparkSession = SparkSession.builder().master("local").appName("LearnSparkSQL").getOrCreate()

  def main(args: Array[String]): Unit = {
    import sparkSession.implicits._

    // Create a simple DataFrame, store into a partition directory
    val squaresDF = sparkSession.sparkContext.makeRDD(1 to 5).map(i => (i, i * i)).toDF("value", "square")

    squaresDF.write.parquet("data/test_table/key=1")

    // Create another DataFrame in a new partition directory,
    // adding a new column and dropping an existing column
    val cubesDF = sparkSession.sparkContext.makeRDD(6 to 10).map(i => (i, i * i * i)).toDF("value", "cube")
    cubesDF.write.parquet("data/test_table/key=2")

    // Read the partitioned table
    val mergedDF = sparkSession.read.option("mergeSchema", "true").parquet("data/test_table")
    mergedDF.printSchema()

  }

}
