package com.gy.spark.sparksql

import java.util.Properties

import org.apache.spark.sql.api.java.UDF2
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

import scala.collection.mutable

object DataFromMysql {


  def main(args: Array[String]): Unit = {
    val session = SparkSession.builder()
      .master("local")
      .appName(this.getClass.getSimpleName)
      .getOrCreate()

    val jdbcMap = mutable.HashMap[String, String]()
    jdbcMap.put("url", "jdbc:mysql://mainframe59:7306/db1")
    jdbcMap.put("driver", "com.mysql.jdbc.Driver")
    jdbcMap.put("user", "root")
    jdbcMap.put("password", "123456")
    jdbcMap.put("dbtable", "sc")

    val df: DataFrame = session.read.format("jdbc").options(jdbcMap).load()

    //此处一个分区 suffle之后默认200个分区
    val size = df.rdd.partitions.size

    df.createTempView("t1")
    val result: DataFrame = session.sql("select * from t1 limit 100")
    result.show()

    val properties = new Properties()
    properties.setProperty("user", "root")
    properties.setProperty("password", "123456")
    result.write.mode(saveMode = SaveMode.Overwrite)
      .jdbc(
        jdbcMap("url"),
        jdbcMap("dbtable"),
        properties
      )

  }


}
