package Spark2JDBC

/**
  * Created by Administrator on 2018/5/28.
  */

import java.util.Properties
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object JDBCDataSource {
  def main(args: Array[String]): Unit = {

    val spark = SparkSession
      .builder()
      .appName("JDBCDataSource")
      .master("local[*]")
      .getOrCreate()

    import spark.implicits._
    //load这个方法会读取真正mysql的数据吗？不会，但是要读取表头信息
    // val url = "jdbc:mysql://localhost:3306/dbname?useUnicode=true&characterEncoding=utf8"
    val logs: DataFrame = spark.read.format("jdbc").options(
      Map("url" -> "jdbc:mysql://localhost:3306/spark2",
        "driver" -> "com.mysql.jdbc.Driver",
        "dbtable" -> "logs",
        "user" -> "root",
        "password" -> "123")
    ).load()

    //logs.printSchema()

    //logs.show()

    //函数式编程
    //    val filtered: Dataset[Row] = logs.filter(r => {
    //      r.getAs[Int]("age") <= 13
    //    })
    //    filtered.show()

    //lambda表达式
    val r = logs.filter($"age" <= 13)

    //val r = logs.where($"age" <= 13)

    val reslut: DataFrame = r.select($"id", $"name", $"age" * 10 as "age")

    //将处理后的数据再写回到数据库
    //val props = new Properties()
    //props.put("user","root")
    //props.put("password","123568")
    //reslut.write.mode("ignore").jdbc("jdbc:mysql://localhost:3306/bigdata", "logs1", props)

    //DataFrame保存成text时出错(只能保存一列),而且只支持String类型的数据
    //reslut.write.text("D:\\SparkTestData\\text")

    //reslut.write.json("D:\\SparkTestData\\json")

    //CSV默认以逗号分隔
    //reslut.write.csv("D:\\SparkTestData\\csv")

    //parquet是一种智能模式
    reslut.write.parquet("D:\\SparkTestData\\parquet")

    //reslut.show()

    spark.close()
  }
}
