package com.shujia.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo7parquet {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .appName("sql")
      .master("local")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()


    val studentDF: DataFrame = spark.read
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .csv("spark/data/students.txt")


    /**
      * parquet格式自带了列名和类型，同时会对数据进行压缩
      * 可以和hive整合
      *
      */
    //将df保存为parquet格式

    studentDF
      .write
      .mode(SaveMode.Overwrite)
      .csv("spark/data/parquet")


  /*  //去读parquet格式的文件
    val parquetDF: DataFrame = spark.read.parquet("spark/data/parquet")

    parquetDF.show()
    parquetDF.printSchema()
*/



    //



  }
}
