package com.dtkavin.spark.rdd

import java.sql.{PreparedStatement, Connection, DriverManager}

import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.rdd.JdbcRDD


/**
  * Created by IntelliJ IDEA.
  * Programmer : John Zn
  * Date : 2016/4/14 0014
  * Time : 11:32
  * Discribtion : 通过JDBC RDD ，直接对数据库进行读写操作
  * 注意：读写是要考虑如何读写，是否要partition的读写
  * !!!!注意:一个小bug，JDBCRDD中的sql语句中，id必须是>=或<=，否则，少一个=，就少一个值
  */
class JDBCRDD_Demo extends Serializable{
  def getConnection() = {
    Class.forName("com.mysql.jdbc.Driver")
    DriverManager.getConnection("jdbc:mysql://192.168.117.45/db_bigdata_rule?useUnicode=true&characterEncoding=UTF-8", "root", "123456")
  }

  /*
  * 不使用spark
  * 用jdbc直接写入
  * */
  def write2Mysql(): Unit = {
    var conn: Connection = null
    val sql = "insert into user (name, age) VALUES (?, ?)"
    var pstmt: PreparedStatement = null
    try {
      conn = getConnection()
      pstmt = conn.prepareStatement(sql)
      pstmt.setString(1, "亚洲")
      pstmt.setInt(2, 25)
      pstmt.executeUpdate()
    } catch {
      case e: Exception => e.printStackTrace()
    }
    finally {
      pstmt.close()
      conn.close()
    }


    conn.close()
    println("write done")
  }

  /*
   *使用spark中的rdd直接读取数据
  */
  def readFromMysql(): Unit = {
    val conf = new SparkConf()
    val sc = new SparkContext("local[3]", "JDBCRDD_READ", conf)

    //sql语句中，id必须是>=或<=，否则，少一个=，就少一个值
    val sql = "select * from user where id >= ? and id <= ?" //由于这条语句需要在task中运行，需要序列化这个类
    val jdbcRDD = new JdbcRDD(sc, getConnection, sql, 1, 9,2, rs  => {
      (rs.getInt(1), rs.getString(2), rs.getInt(3))
    })
    println(jdbcRDD.partitions.length)
    println(jdbcRDD.collect.toBuffer)//为什么没有聚合？
  }
}

object JDBCRDD_Demo {
  def main(args: Array[String]) {
    val jdbcdemo = new JDBCRDD_Demo
    /*
    * write by jdbc
    * */
//        jdbcdemo.write2Mysql()

    /*
    * read by jdbcRDD
    * */
    jdbcdemo.readFromMysql()
  }

}
