package sql

import org.apache.spark.sql.{DataFrameReader, SparkSession}
import org.junit.Test

/**
 * ClassName: Intro <br/>
 * Description: <br/>
 * date: 2020/8/7 10:02<br/>
 *
 * @author Hesion<br/>
 * @version
 * @since JDK 1.8
 */
class Intro {
  System.setProperty("hadoop.home.dir","C:\\hadoop")
  @Test
  def reader1: Unit ={
    val spark: SparkSession = SparkSession.builder()
      .master("local[6]")
      .appName("reader1")
      .getOrCreate()
    //框架在哪
    val reader: DataFrameReader = spark.read
  }

  @Test
  def reader2: Unit ={
    val spark: SparkSession = SparkSession.builder()
      .master("local[6]")
      .appName("reader2")
      .getOrCreate()

    //第一种形式
    spark.read
      //指定文件类型
        .format("csv")
      //指定第一行为header
      .option("header",value = true)
      //指定结构信息
      .option("inferSchema",value = true)
      .load("dataset/BeijingPM20100101_20151231.csv")
      .show(10)


    //第二种形式
    spark.read
      //指定第一行为header
      .option("header",value = true)
      //指定结构信息
      .option("inferSchema",value = true)
      .csv("dataset/BeijingPM20100101_20151231.csv")
      .show()

  }

  @Test
  def write: Unit ={

    //创建SqlSession
    val spark: SparkSession = SparkSession.builder()
      .master("local[6]")
      .appName("reader2")
      .getOrCreate()

    //2读取数据集
    val df = spark.read.option("header",true).csv("dataset/BeijingPM20100101_20151231.csv")

    //3写入数据集
    df.write.json("output/beijing_pm")


  }
}
