package com.xiaoxu.spark.ExternalDataSource

import org.apache.spark.sql.SparkSession

/**
  * Parquet 是列式存储的一种文件类型
  * 直接在虚拟机里边演示
  * 启动sparksql:  ./spark-sql --master local[2] --jars ~/software/mysqlDriver
  *
  */
object ParquetFileDemo {
  def main(args: Array[String]) {

    val spark = SparkSession.builder().appName("SparkSessionApp")
      .master("local[2]").getOrCreate()


    /**
      * spark.read.format("parquet").load 这是标准写法
      */
    val userDF = spark
      .read
      .format("parquet")
      .load("file:///home/hadoop/app/spark-2.1.0-bin-2.6.0-cdh5.7.0/examples/src/main/resources/users.parquet")

    userDF.printSchema()
    userDF.show()

    userDF.select("name", "favorite_color").show

    userDF
      .select("name", "favorite_color")
      .write.format("json")
      .save("file:///home/hadoop/tmp/jsonout")

    //因为sparksql默认处理的format就是parquet
    spark
      .read
      .load("file:///home/hadoop/app/spark-2.1.0-bin-2.6.0-cdh5.7.0/examples/src/main/resources/users.parquet")
      .show

    //会报错，因为sparksql默认处理的format就是parquet
    spark
      .read
      .load("file:///home/hadoop/app/spark-2.1.0-bin-2.6.0-cdh5.7.0/examples/src/main/resources/people.json")
      .show

    spark
      .read
      .format("parquet")
      .option("path", "file:///home/hadoop/app/spark-2.1.0-bin-2.6.0-cdh5.7.0/examples/src/main/resources/users.parquet")
      .load().show


    /**
      * sparkSQL
      * :USING的用法
      * 文档：http://spark.apache.org/docs/latest/sql-data-sources-parquet.html#loading-data-programmatically
      * 创建临时表
      * CREATE TEMPORARY VIEW parquetTable
      * USING org.apache.spark.sql.parquet
      * OPTIONS (
      * path "/home/hadoop/app/spark-2.1.0-bin-2.6.0-cdh5.7.0/examples/src/main/resources/users.parquet"
      * )
      */

    spark.stop()
  }
}
