package cn.itcast.spark.sql

import org.apache.spark.sql.{SaveMode, SparkSession}
import org.junit.Test

class ReadWrite {
  System.setProperty("hadoop.home.dir", "C:\\winutils")

  val spark = SparkSession.builder()
    .appName("reader")
    .master("local[6]")
    .getOrCreate()

  @Test
  def reader1(): Unit = {
    val spark = SparkSession.builder()
      .appName("reader")
      .master("local[6]")
      .getOrCreate()

    val reader = spark.read

  }

  @Test
  def reader2(): Unit = {
    val spark = SparkSession.builder()
      .appName("reader")
      .master("local[6]")
      .getOrCreate()

    spark.read
      .format("csv")
      .option("header", value = true)
      .option("inferSchema", value = true)
      .load("dataset/BeijingPM20100101_20151231.csv")
      .show(10)

    spark.read
      .option("header", value = true)
      .option("inferSchema", value = true)
      .csv("dataset/BeijingPM20100101_20151231.csv")
      .show(10)

  }

  @Test
  def writer1(): Unit = {
    System.setProperty("hadoop.home.dir", "C:\\winutils")

    // 1. 创建SparkSession
    val spark = SparkSession.builder()
      .appName("reader")
      .master("local[6]")
      .getOrCreate()

    // 2. 读取数据集
    val df = spark.read.option("header", value = true).csv("dataset/BeijingPM20100101_20151231.csv")

    // 3. 写入数据集
    df.write.json("dataset/beijing_pm.json")

    df.write.format("json").save("dataset/beijing_pm2.json")
  }

  @Test
  def parquet(): Unit = {
    val df = spark.read.option("header", value = true).csv("dataset/BeijingPM20100101_20151231.csv")
    df.write
      .format("parquet")
      .mode(SaveMode.Overwrite)
      .save("dataset/beijing_pm3.parquet")

    spark.read
      .load("dataset/beijing_pm3.parquet")
      .show()
  }

  /**
   * 表分区的概念不仅在parquet上有，其他格式的文件也可以指定表分区
   */
  @Test
  def parquetPartitions(): Unit = {
    // 1. 读取数据
//    val df = spark.read.option("header", value = true).csv("dataset/BeijingPM20100101_20151231.csv")

    // 2. 写文件，表分区
//    df.write
//      .partitionBy("year", "month")
//      .save("dataset/beijing_pm4")

    // 3. 读文件，自动发现分区
//    spark.read
//      .parquet("dataset/beijing_pm4/year=2010/month=1")
//      .printSchema()

    spark.read
      .parquet("dataset/beijing_pm4")
      .printSchema()

  }

  @Test
  def json(): Unit = {
//    val df = spark.read.option("header", value = true).csv("dataset/BeijingPM20100101_20151231.csv")
//
//    df.write.json("dataset/beijing_pm5.json")

    spark.read
      .json("dataset/beijing_pm5.json")
      .show()
  }

  /**
   * toJSON的场景
   */
  @Test
  def json1(): Unit = {
    val df = spark.read.option("header", value = true).csv("dataset/BeijingPM20100101_20151231.csv")
    df.toJSON.show()
  }

  /**
   * 从消息队列中取出JSON格式的数据，需要使用SparkSQL进行处理
   */
  @Test
  def json2(): Unit = {
    val df = spark.read.option("header", value = true).csv("dataset/BeijingPM20100101_20151231.csv")
//    df.toJSON.show()
    val jsonRDD = df.toJSON.rdd
    spark.read.json(jsonRDD).show()
  }

}







































