package dataframe

import java.util.Properties

import org.apache.spark.SparkConf
import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, SparkSession}

object DataFrame_ReaderTest {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local[*]")
    conf.setAppName("DataFrame_ReaderTest")

    val spark: SparkSession = SparkSession
      .builder()
      .config(conf)
      .getOrCreate()

    // 读取文本文件创建DataFrame
    val txtDF: DataFrame = spark
      .read
      .format("text")
      .load("data/word.txt")

    txtDF.printSchema()
    txtDF.show()

    val txtDF1 = spark.read.text("data/word.txt")
    txtDF1.show()

    // 读取CSV文件创建DataFrame
    val employeeDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .option("inferSchema", true)
      .option("header", true)
      .option("samplingRatio", 0.1)
      .load("data/employee.csv")

    employeeDF.printSchema()
    employeeDF.show()

    val employeeDF1: DataFrame = spark
      .read
      .option("sep", ",")
      .option("inferSchema", true)
      .option("samplingRatio", 0.1)
      .option("header", true)
      .csv("data/employee.csv")
    employeeDF1.printSchema()
    employeeDF1.show()

    // 对于大型的数据源，指定一个schema要比让Spark来进行推断效率更高。
    val schema: StructType = new StructType(Array(
      new StructField("ID", StringType, false),
      new StructField("movieTitle", StringType, false),
      new StructField("movieGenres", StringType, false)
    ))
    val movieDF: DataFrame = spark
      .read
      .option("header", true)
      .option("sep", ",")
      .schema(schema)
      .csv("data/movies.csv")
    movieDF.printSchema()
    movieDF.show()

    // 读取JSON文件创建DataFrame
    val peopleDF: DataFrame = spark
      .read
      .json("data/people.json")
    peopleDF.printSchema()
    peopleDF.show()

    // 也可以明确指定一个schema，覆盖Spark的推断schema
    val jsonSchema: StructType = new StructType(Array(
      new StructField("name", StringType, false),
      new StructField("age", IntegerType, true)
    ))
    val peopleDFWithSchema = spark.read.schema(jsonSchema).json("data/people.json")
    peopleDFWithSchema.printSchema()
    peopleDFWithSchema.show()

    // 读取Parquet文件创建DataFrame
    val userDF: DataFrame = spark.read.parquet("data/users.parquet")
    userDF.printSchema()
    userDF.show()


    // 使用JDBC从数据库创建DataFrame
    val DB_URL="jdbc:mysql://localhost:3306/db?useSSL=false&useUnicode=true&characterEncoding=utf8"
    val TABLE_NAME="db_test"
    val DRIVER_CLASS_NAME="com.mysql.jdbc.Driver"
    val properties = new Properties()
    properties.setProperty("driver",DRIVER_CLASS_NAME)
    properties.setProperty("user","")
    properties.setProperty("password","")

    val jdbcDF: DataFrame = spark.read.jdbc(DB_URL, TABLE_NAME, properties)
    jdbcDF.printSchema()
    jdbcDF.show()



    spark.stop()

  }

}
