package mysql

import java.sql.{Connection, DriverManager, PreparedStatement}

import org.apache.spark.rdd.{JdbcRDD, RDD}
import org.apache.spark.{SparkConf, SparkContext}
import org.junit.{After, Before, Test}

import scala.util.parsing.json.JSON

/**
 * 除了窄依赖的依赖都是宽依赖
 */
class Driver {

	val conf: SparkConf = new SparkConf().setAppName("Serializ").setMaster("local[3]")
	var sc: SparkContext = null
	var outpath: String = "out"

	import util.MyPredef._

	@Before
	def init() {
		sc = new SparkContext(conf)
		outpath.delete()
	}

	@After
	def after() {
		sc.stop()
	}

	@Test
	def query(): Unit = {

		val driver = "com.mysql.jdbc.Driver"
		val url = "jdbc:mysql://localhost:3306/db3"
		val userName = "root"
		val passwd = "123456"

		val sql: String = "select username, id from db3.user where id >= ? and id <= ?"

		val jdbc: JdbcRDD[String] = new JdbcRDD(
			sc,
			() => {
				Class.forName(driver)
				DriverManager.getConnection(url, userName, passwd)
			},
			sql,
			40, // 上下限用于分区
			45,
			2,
			rs => (rs.getString(1) + ", " + rs.getInt(2))
		)
		jdbc.foreachPartition(x => println(x.toArray.mkString(" | ")))
		//		老王, 41 | 小二王, 42
		//		小二王, 43 | 传智播客, 45

	}

	/**
	 * 每个分区执行一次，和 mapPartitions（转换算子） 是一样的，行动算子
	 */
	@Test
	def save(): Unit = {

		val data: RDD[(String, Int)] = sc.parallelize(List(("zhangsan", 100), ("lisi", 101), ("wangwu", 102)))

		data.foreachPartition(datas => {
			val driver = "com.mysql.jdbc.Driver"
			Class.forName(driver)
			val url = "jdbc:mysql://localhost:3306/db3"
			val userName = "root"
			val passwd = "123456"

			val conn = DriverManager.getConnection(url, userName, passwd)
			datas.foreach{
				case (username , id) => {
					val statement = conn.prepareStatement("insert into db3.user (username, id) values (?, ?)")
					statement.setString(1, username)
					statement.setInt(2, id)
					statement.execute()
				}
			}
			conn.close()
		})
	}

}
