package core_sql.day06_sql.source_jdbc

import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 数据库数据源
  */
object JDBCDataSource {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("JdbcDataSource")
      .master("local[*]")
      .getOrCreate()

    import spark.implicits._

    //spark.read.jdbc()

    //调用load是，为什么要连接数据库（读取数据库表的schema）
    val logs: DataFrame = spark.read.format("jdbc").options(
      Map("url" -> "jdbc:mysql://localhost:3306/bigdata",
        "driver" -> "com.mysql.jdbc.Driver",
        "dbtable" -> "logs",
        "user" -> "root",
        "password" -> "123568")
    ).load()


    //logs.show()

    //    val filter = logs.filter(r => {
    //      r.getAs[Int](2) <= 13
    //    })

    //val r = logs.filter($"age" <= 13)
    val r = logs.where($"age" <= 13)

    val reslut: DataFrame = r.select($"id", $"name", $"age" + 10 as "age")

    //reslut.show()

    //val props = new Properties()
    //props.put("user","root")
    //props.put("password","123568")
    //reslut.write.mode("overwrite").jdbc("jdbc:mysql://localhost:3306/bigdata", "logs1", props)

    //DataFrame保存成text时出错
    //reslut.write.text("/Users/zx/Desktop/text")

    //reslut.write.json("/Users/zx/Desktop/json")

    //reslut.write.csv("/Users/zx/Desktop/csv")

    reslut.write.parquet("/Users/zx/Desktop/parquet")


    spark.close()




  }

}
