import org.apache.spark.sql.{DataFrameReader, SaveMode, SparkSession}
import org.junit.Test

class ReadWriter {
  @Test
  def reader(): Unit = {
    //1、创建sparkSession
    val spark = SparkSession.builder()
      .master("local[6]")
      .appName("Reader")
      .getOrCreate()
    //第一种形式
    spark.read
      .option("header", value = true)
      .format("csv")
      //结构信息
      .option("inferSchema", value = true)
      .load("C:\\Users\\HR\\Desktop\\A.csv")
      .show(10)
    //第二种形式
    spark.read
      .option("header", value = true)
      //结构信息
      .option("inferSchema", value = true)
      .csv("C:\\Users\\HR\\Desktop\\A.csv")
  }

  @Test
  def writer(): Unit = {
    //1、创建sparkSession
    val spark = SparkSession.builder()
      .master("local[6]")
      .appName("Reader")
      .getOrCreate()
    //2、读取数据集
    val df = spark.read
      .option("header", true)
      .csv("C:\\Users\\HR\\Desktop\\aaa.csv")
    //3、写入数据集
    df.write.json("C:\\Users\\HR\\Desktop\\aaa.json")
    df.write
      .format("json")
      .save("C:\\Users\\HR\\Desktop\\aaa2.json")
  }

  @Test
  def parquet(): Unit = {
    val spark = SparkSession.builder()
      .master("local[6]")
      .appName("Reader")
      .getOrCreate()
    //读取csv文件的数据
    val df = spark.read.option("header", true).csv("C:\\Users\\HR\\Desktop\\A.csv")
    //将数据写为parquet格式
    df.write
      .format("parquet")
      //重写已有的
      .mode(SaveMode.Overwrite)
      //追加
      //.mode(SaveMode.Append)
      .save("C:\\Users\\HR\\Desktop\\a")
    //读取parquet
    spark.read
      .load("C:\\Users\\HR\\Desktop\\a")
      .show()
  }

  @Test
  def partition(): Unit = {
    val spark = SparkSession.builder()
      .master("local[6]")
      .appName("Reader")
      .getOrCreate()
    //1、读取数据
    val df = spark.read
      .option("header", value = true)
      .csv("C:\\Users\\HR\\Desktop\\A.csv")
    //2、写文件
    df.write
      .partitionBy("Year", "month")
      .mode(SaveMode.Overwrite)
      .save("C:\\Users\\HR\\Desktop\\abc")
    //3、读文件，自动发现分区
    spark.read
      .parquet("C:\\Users\\HR\\Desktop\\abc/Year=2010/month=1")
      .show()
  }

  @Test
  def json(): Unit = {
    val spark = SparkSession.builder()
      .master("local[6]")
      .appName("Reader")
      .getOrCreate()
    val df = spark.read
      .option("header", value = true).
      csv("C:\\Users\\HR\\Desktop\\A.csv")
    df.write
      .mode(SaveMode.Overwrite)
      .json("C:\\Users\\HR\\Desktop\\aaa.json")
    //读取json文件
    spark.read
      .json("C:\\Users\\HR\\Desktop\\aaa.json")
      .show()
  }

  @Test
  def json1(): Unit = {
    val spark = SparkSession.builder()
      .master("local[6]")
      .appName("Reader")
      .getOrCreate()
    val df = spark.read
      .option("header", value = true).
      csv("C:\\Users\\HR\\Desktop\\A.csv")
    //转换成json
    df.toJSON.show()
  }

  @Test
  def json2(): Unit = {
    val spark = SparkSession.builder()
      .master("local[6]")
      .appName("Reader")
      .getOrCreate()
    val df = spark.read
      .option("header", value = true).
      csv("C:\\Users\\HR\\Desktop\\A.csv")
    //将json形式的数据转换成DataFrame
    val jsonRDD = df.toJSON.rdd
    spark.read.json(jsonRDD).show()
  }
}
