package com.spark.util.example

import java.util.Properties
import com.spark.util.core.{Logging, Sparking}
import org.apache.spark.sql.SaveMode

object DatasetExample extends Sparking with Logging {

  def main(args: Array[String]): Unit = {

    val spark = getSparkSession(None)
    import spark.sqlContext.implicits._

    // 写入mysql
    val props = new Properties()
    props.put("driver", "com.mysql.jdbc.Driver")
    props.put("user", "root")
    props.put("password", "123456")

    val df = spark.sparkContext
      .makeRDD(List.range(0,10))
      .map(x=>(x,"name"))
      .toDF("id","name")

    df.write
      .mode(SaveMode.Append) //.save() 存在bug
      .jdbc("jdbc:mysql://localhost:3306/test?characterEncoding=UTF-8","a", props)

    // 读取mysql 并发度为1
    // 并行读取需要添加配置
    // 1 lowerBound  upperBound numPartions 只能使用整形数据字段作为分区关键字
    // 2 predicates: Array[String] 根据任意类型字段分区,这种方式适合各种场景，较为推荐
    //partition设置过高,大量partition同时读取数据库,也可能将数据库弄挂,需要注意
    val params = Map[String,String](
      "url"->"jdbc:mysql://localhost:3306/test?characterEncoding=UTF-8",
      "driver"->"com.mysql.jdbc.Driver",
      "dbtable"->"a",
      "user"->"root",
      "password"->"123456")
    spark.sqlContext
      .read
        .jdbc()

    //csv读取
    spark.read.format("csv")
      .option("delimiter", ",")
      .option("header", "true")
      .option("nullValue", "\\N")
      .load("C:\\Users\\27255\\Desktop\\VEHICLENUMBER_201912241337.csv")
  }
}
