package com.oreilly.learningsparkexamples.scala

import java.sql.{DriverManager, ResultSet}

import org.apache.spark.SparkContext
import org.apache.spark.rdd.JdbcRDD

/**
  * 加载关系型数据库中的数据
  */
object LoadSimpleJdbc{

  def main(args: Array[String]): Unit = {
    if(args.length<1){
      println("Usage: [sparkmaster]")
      System.exit(1)
    }

    val master = args(0)
    val sc = new SparkContext(master,"LoadSimpleJdbc")
    val data = new JdbcRDD(sc,
      createConnection,
      "SELECT * FROM panda WHERE ? <= id AND id <= ?",
      lowerBound = 1, upperBound = 3,
      numPartitions = 2,
      mapRow = extractValues
    )
    println(data.collect().toList)
  }


  def createConnection() = {
    Class.forName("com.mysql.jdbc.Driver").newInstance()
    DriverManager.getConnection("jdbc:mysql://192.168.12.33/test?user=root&password=123456")
  }

  def extractValues(r: ResultSet) = {
    (r.getInt(1),r.getString(2))
  }

}