package com.imooc.spark

import com.imooc.spark.DataFrameRDDApp.SetLogger
import org.apache.spark.sql.SparkSession

/**
  * Created by zghgchao 2017/12/24 9:06
  * Parquet文件操作
  */
object ParquetApp {
  def main(args: Array[String]): Unit = {
    SetLogger()
    val spark = SparkSession.builder()
      .master("local[2]")
      .appName("DataFrameRDDApp").getOrCreate()

    val path = "file:////home/hadoop/app/spark-2.2.0-bin-2.6.0-cdh5.7.0/examples/src/main/resources/users.parquet"

    /**
      * spark.read.format("parquet").load(path) 这是标准写法
      * spark.read.load(path)   spark默认读取的方式就是parquet
      */
    val userDF = spark.read.parquet(path)
    //  spark.read.format("parquet").option("path",path).load()

    userDF.printSchema()
    userDF.show()

    userDF.select("name","favorite_color").show()

    userDF.select("name","favorite_color").write.json("file:////home/hadoop/tmp/userJson")
    //  userDF.select("name","favorite_color").write.format("json").save("file:////home/hadoop/tmp/userJson")

    spark.stop()
  }

  /**
    * ./bin/spark-sql.sh --master local[2]
    *     #注意USING的用法
    *     CREATE TEMPORARY VIEW parquetTable
    *     USING org.apache.spark.sql.parquet
    *     OPTIONS (
    *       path "/home/hadoop/app/spark-2.1.0-bin-2.6.0-cdh5.7.0/examples/src/main/resources/users.parquet"
    *     )
    *
    *     SELECT * FROM parquetTable
    *
    */
}
