package com.dtkavin.sparkSQL

import java.util.Properties

import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by IntelliJ IDEA.
  * Programmer : John Zn
  * Date : 2016/4/23 0023
  * Time : 00:14
  * Discribtion : 在在sparkSQL中，对mysql数据库进行与DataFrame进行数据读写操作
  */
class JDBCSparkSql {

}

object JDBCSparkSql {
  def imports(): Unit = {
    val conf = new SparkConf()
    val sc = new SparkContext("local[3]", "imports", conf)
    val ssc = new SQLContext(sc)

    val jdbcDF = ssc.read.format("jdbc").options(Map("url" -> "jdbc:mysql://mysql01:3306/hivedb", "driver" -> "com.mysql.jdbc.Driver", "dbtable" -> "t_person2", "user" -> "root", "password" -> "123456")).load()
    jdbcDF.show()

    sc.stop()
  }

  //直接将数据都写到mysql中了，不是写到hive中，没有落地到hdfs
  def exports(): Unit = {
    val conf = new SparkConf()
    val sc = new SparkContext("local[3]", "exports", conf)
    //    sc.addJar("G:\\BigData\\lessions\\spark\\spark-day7\\lecture\\mysql-connector-java-5.1.35-bin.jar")
    val sqlc = new SQLContext(sc)

    val rdd1 = sc.parallelize(List((1, "aaa", 34), (3, "bbb", 78), (7, "ccc", 22), (4, "ddd", 11)))
    //    println(rdd1.collect().toBuffer)
    val personRdd = rdd1.map(x => Person(x._1.toInt, x._2, x._3.toInt))
    import sqlc.implicits._
    val personDF = personRdd.toDF

    //写mysql操作
    val prop = new Properties()
    prop.put("user", "root")
    prop.put("password", "123456")
    personDF.write.mode("append").jdbc("jdbc:mysql://mysql01:3306/hivedb", "t_person2", prop)

    sc.stop()
  }

  def main(args: Array[String]) {
    //    exports()

    println("*" * 30)

    imports()
  }
}

