package chapter04

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.sql.functions.col

import java.util.Properties

object test {
  def main(args: Array[String]): Unit = {
    //csv的读写
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("student")
      .getOrCreate()
    //读MySQL
    val df = spark.read.format("jdbc")
      .option("url", "jdbc:mysql://hadoop01:3306/student?useUnicode=true&characterEncoding=utf8&useSSL=false")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "123456")
      .option("dbtable", "user")
      .load()
    df.show()
    import spark.implicits._
    val sc = spark.sparkContext
    val df1 = sc.makeRDD(List((9, "刘天天", 24, "大数据2班")))
      .toDF("id", "name", "age", "grade")
    val properties = new Properties()
    properties.put("user","root")
    properties.put("password","123456")
    val dfFirst = spark.read.format("csv")
      .option("sep", ",")
      .option("inferSchema", "true")
      .option("header", "true")
      .load("input/Employee_salary_first_half.csv")
    dfFirst
      .write
      .mode(SaveMode.Append)
      .format("jdbc")
      .option("url","jdbc:mysql://hadoop01:3306/student?useUnicode=true&characterEncoding=utf8&useSSL=false")
      .option("user","root")
      .option("password","123456")
      .option("dbtable","test")
      .saveAsTable("test")
      //.jdbc("jdbc:mysql://hadoop01:3306/student?useUnicode=true&characterEncoding=utf8&useSSL=false",
      //"user",properties)
  }
}
