package SQL

import org.apache.spark.sql.{DataFrameReader, DataFrameWriter, Row, SaveMode, SparkSession}
import org.junit.Test

class ReadWriter {
  //创建SparkSession
  val spark = SparkSession.builder()
    .master("local[6]")
    .appName("ReadWriter")
    .getOrCreate()

  @Test
  def read(): Unit = {
    //第一种读取形式
    spark.read
      .format("csv")
      .option("header", value = true)
      .option("inferSchema", value = true)
      .load("resource/data.csv")
    //第二种读取形式
    spark.read
      .option("header", value = true)
      .option("inferSchema", value = true)
      .csv("resource/data.csv")
  }

  @Test
  def writer(): Unit = {
    //读取数据集
    val df = spark.read
      .format("csv")
      .option("header", value = true)
      .option("inferSchema", value = true)
      .load("resource/data.csv")
    //写出数据集第一种
    df.write
      .json("resource/data.json")

    //第二种
    df.write
      .format("json")
      .save("resource/data.json")
  }

  @Test
  def parquet(): Unit = {
    //1、读取csv文件的数据
    val df = spark.read
      .option("header", value = true)
      .csv("resource/data.csv")
    //2、把数据改写为parquet格式
    df.write
      .format("parquet")
      .mode(SaveMode.Overwrite)
      .save("resource/data1")
    //读取parquet格式文件
    spark.read
      //默认的读取格式就是Parquet
      .load("resource/data1")
  }

  @Test
  def json(): Unit = {
    //1、读取csv文件的数据
    val df = spark.read
      .option("header", value = true)
      .csv("resource/data.csv")
    //写成json
    df.write
      .json("resource/data.json")
    //读取json文件
    spark.read
      .json("resource/data.json")
      .show()
    //直接将DataFrame转换成json
    val jsonRDD = df.toJSON.rdd
    //从RDD中读取json
    spark.read.json(jsonRDD)
  }
}
