package com.guchenbo.spark.sql

import org.apache.spark.sql.{DataFrameReader, SparkSession}

/**
 * spark jdbc 并行读取
 *
 * @author guchenbo
 * @date 2021/6/25
 */
object JdbcReadParallel {

  def main(args: Array[String]): Unit = {
    var spark = SparkSession.builder().appName("Spark Jdbc Read Parallel")
      .master("local[2]")
      .enableHiveSupport()
      .config("hive.metastore.uris", "thrift://ark150:9083")
      .config("hive.exec.scratchdir", "/tmp/hive")

      .getOrCreate()
    read(spark)
  }


  def read(spark: org.apache.spark.sql.SparkSession): Unit = {
    println("读取外表")

    var sql =
      """
        |SELECT a.*,b.name as nameb
        |FROM model_manager.model_info a
        |         LEFT JOIN model_manager.model_form_template b ON a.form_template_uuid = b.uuid
        |""".stripMargin

    def makeReader = {
      spark.read
        .format("jdbc")
        .option("url", "jdbc:mysql://10.57.16.13:3306/model_manager_zhongyin?useSSL=false")
        .option("user", "model_paas_manager")
        .option("password", "Liu_0123456789")
        .option("driver", "com.mysql.jdbc.Driver")
    }

    var partitionColumn = "gmt_create"
    var maxSql = s"SELECT MAX(maxmin.$partitionColumn) as maxv, MIN(maxmin.$partitionColumn) as minv FROM( $sql ) maxmin"
    println(maxSql)

    var dfr = makeReader
      .option("query", maxSql)

    var df = dfr.load()
    var arr = df.select("maxv", "minv").take(1)(0)
    var upperB = String.valueOf(arr(0))
    var lowerB = String.valueOf(arr(1))
    println(upperB)
    println(lowerB)

    dfr = makeReader
      .option("dbtable", s"($sql) sub")
      .option("numPartitions", 5)
      .option("partitionColumn", partitionColumn)
      .option("upperBound", upperB)
      .option("lowerBound", lowerB)

    df = dfr.load()
    df.show()
    println(df.collect().size)
  }
}
