package SparkSQL

import org.apache.spark.sql.{DataFrame, DataFrameReader, SparkSession}
import org.apache.spark.sql.types.{FloatType, IntegerType, StringType, StructField, StructType}
import org.junit.Test

class DataFrameTest {
  //创建SparkSession
  val spark = SparkSession.builder()
    .master("local[*]")
    .appName("createDataFrame")
    .getOrCreate()

  @Test
  def createDataFrame01() = {
    //框架
    //第一种读取形式
    val data: DataFrame = spark.read
      .format("csv")
      .option("header", true)
      .option("inferSchema", true) //文件的数据类型（每一行）
      .load("data/BeijingPM20100101_20151231.csv")

    //第二种读取形式
    spark.read
      .option("header", true)
      .option("inferSchema", true) //文件的数据类型（每一行）
      .csv("data/BeijingPM20100101_20151231.csv")
      .show()
  }

  @Test
  def createDataFrame02() = {
    //隐式转换
    import spark.implicits._
    val df: Unit = Seq(
      (1, "First Value", java.sql.Date.valueOf("2010-01-01")),
      (2, "Second Value", java.sql.Date.valueOf("2010-02-01"))
    ).toDF("int_column", "string_column", "date_column").show()
  }

  @Test
  def createDataFrame03() = {
    spark.read
      .format("jdbc")
      .option("url", "jdbc:mysql://127.0.0.1:3306/spark")
      .option("dbtable", "student")
      .option("user", "root")
      .option("password", "123456")
      .load()
      .show()
  }
}
