package com.peng.sparktest.sparksql

import java.util.Properties

import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

object SparkSql03_JDBC {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("spark").setMaster("local")
      .set("spark.sql.shuffle.partitions", "1") //调节shuffle分区数
    val session: SparkSession = SparkSession
      .builder()
      .config(conf)
      //      .master("local")
      //      .appName("spark")
      //      .enableHiveSupport() //开启后将支持从hive进行元数据读取，不开启，只能执行由spark-sql中catalog映射的数据（select语句），而不能执行编辑型语sql句
      .getOrCreate()

    val context: SparkContext = session.sparkContext
    context.setLogLevel("ERROR")

    val pro = new Properties()
    pro.put("url", "jdbc:mysql://127.0.0.1/spring_boot")
    pro.put("user", "root")
    pro.put("password", "198608")
    pro.put("driver", "com.mysql.jdbc.Driver")
    val employee: DataFrame = session.read.jdbc(pro.getProperty("url"), "employee", pro)
    val dept: DataFrame = session.read.jdbc(pro.getProperty("url"), "dept", pro)
    employee.createTempView("employee")
    dept.createTempView("dept")
    //join操作底层很有可能会调用shuffle，shuffle就会有一个并行度（分区数），默认来说，如果数据不大于200，这个值默认是200，一般需要通过conf调整该数值
    val selectRes: DataFrame = session.sql("select e.name,d.name as role,d.salary from employee e left join dept d on e.deptid=d.id")

    println(s"分区数：${selectRes.rdd.partitions.length}")
    val dataset: Dataset[Row] = selectRes.coalesce(1)
    println(s"分区数：${dataset.rdd.partitions.length}")


    //    selectRes.show()
    //    selectRes.printSchema()
    dataset.show()
    //向数据库写入结果
    selectRes.write.jdbc(pro.getProperty("url"), "join_info3", pro)


  }

}
