package com.shujia.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo9WriteJdbc {

  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .appName("sql")
      .master("local")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()


    val studentDF: DataFrame = spark.read
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .csv("spark/data/students.txt")


    studentDF.write
        .format("jdbc")
        .option("url", "jdbc:mysql://master:3306")
        .option("dbtable", "test.student")
        .option("characterEncoding", "utf-8")
        .option("user", "root")
        .option("password", "123456")
        .mode(SaveMode.Append)
        .save()


    studentDF.write
      .format("jdbc")
      .option("url", "jdbc:mysql://master:3306")
      .option("dbtable", "test2.student")
      .option("characterEncoding", "utf-8")
      .option("user", "root")
      .option("password", "123456")
      .mode(SaveMode.Overwrite)
      .option("createTableColumnTypes", "id VARCHAR(255),name VARCHAR(255),age INT,gender VARCHAR(255),clazz VARCHAR(255)")
      .save()


    //动态分区
    studentDF
      .write
      .option("sep", ",")
      .mode(SaveMode.Overwrite)
      .partitionBy("clazz", "gender") //动态分区
      .csv("spark/data/partitonBy")

  }

}
