package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo7DataType {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("dsl")
      .master("local")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //1、csv格式
    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,sex STRING,clazz STRING")
      .load("data/students.txt")

    //2、json格式
    studentDF
      .write
      .format("json")
      .mode(SaveMode.Overwrite)
      .save("data/student_json")

    //读取json格式的数据
    val student_json: DataFrame = spark
      .read
      .format("json")
      .load("data/student_json")

    student_json.show()

    //3、parquet格式:自带表结构的压缩格式
    studentDF
      .write
      .format("parquet")
      .mode(SaveMode.Overwrite)
      .save("data/student_parquet")

    //读取parquet格式
    val studentParquet: DataFrame = spark
      .read
      .format("parquet")
      .load("data/student_parquet")

    //打印表结构
    studentParquet.printSchema()
    studentParquet.show()
  }
}
