package org.zjt.spark.sql

import java.io.File
import java.util.Properties

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession



/**
  *   Mysql JDBC的读取、处理
  *   parquest保存、读取数据库表的信息和记录数据
  */
object JDBCSparkSQL extends App{
  val conf = new SparkConf().setAppName("WordCount").setMaster("local")
  val sqlContext = new SparkSession.Builder().config(conf).getOrCreate()
  val url :String ="jdbc:mysql://localhost/test?" +
    "useUnicode=true&characterEncoding=UTF-8&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=UTC"
  val table ="user"
  val driver ="com.mysql.jdbc.Driver"

  // TODO: 1、查询sql Mysql数据库jdbc
  val jdbcDF = sqlContext.read
    .format("jdbc")
    .option("driver",driver)
    .option("url", url)
    .option("dbtable", table)
    .option("user", "root")
    .option("password", "12345")
    .load()
    .where("id = 1 ").orderBy("id")
    .groupBy("name").agg( "id" -> "sum")

  jdbcDF.show()





  // TODO: 2、查询sql
  val connectionProperties = new Properties()
  connectionProperties.put("user", "root")
  connectionProperties.put("password", "12345")
  connectionProperties.put("driver", driver)
  val jdbcDF2 = sqlContext.read.jdbc(url, table, connectionProperties).toDF()
  // sqlContext.sql()
  println(jdbcDF2.collect().mkString(","))


  // TODO:  parquet对文件的拆分、写出和读取（parquet保存表的数据库表的信息，很适合保存spark sql表）
  var tmp = new File("people.parquet").getCanonicalPath
  jdbcDF2.write.partitionBy("id").parquet(tmp)
  val df = sqlContext.read.parquet(tmp)
  println(df.collect().mkString(","))



  // TODO: 3、保存数据到数据库 save sql
  jdbcDF.write
    .format("jdbc")
    .option("url", url)
    .option("driver", driver)
    .option("dbtable", "user2")
    .option("user", "root")
    .option("password", "12345")
    .save()


  // TODO: 4、 保存数据到数据库 save sql
  jdbcDF2.write.jdbc(url, "user3", connectionProperties)


  sqlContext.stop()
}
