package org.example
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import java.util.Properties
object spark_MYSQL {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("sparkBase")
      .getOrCreate()
    val sc = spark.sparkContext
    // 连接并读取MySQL数据
    val pro: Properties = new Properties()
    pro.setProperty("user","root")
    pro.setProperty("password","123456")
    pro.setProperty("driver","com.mysql.jdbc.Driver") //8.0以上加.cj
    val mysqlData = spark.read.jdbc("jdbc:mysql://localhost:3306/mysql?" +
      "verifyServerCertificate=false&useSSL=false","user",pro)
    // mysqlData.show()
    // 文件写入
    val sparkScore = spark.read.option("encoding","GBK")
      .option("header","true")
      .csv("src/main/resources/23yun1.csv")
    val sparkDF = sparkScore.toDF("name","number","score")
    sparkDF.write.mode("append").jdbc("jdbc:mysql://localhost:3306/test?" +
      "verifyServerCertificate=false&useSSL=false","spark",pro)
    // RDD 转换成 DataFrame
    val data: RDD[String] = sc.makeRDD(Array("李四,2,60","张三,1,100"))
    // 1.按MySQL的列名划分
    val dataRDD = data.map(_.split(","))
    // 2.匹配样例类
    val scoreRDD = dataRDD.map(x => Score(x(0),x(1),x(2)))
    // 3.转换(隐式转换) 表结构才能写入数据库
    import spark.implicits._
    val scoreDataFrame = scoreRDD.toDF()
    scoreDataFrame.write.mode("Ignore").jdbc("jdbc:mysql://localhost:3306/test?"
      +
      "verifyServerCertificate=false&useSSL=false","spark",pro)
    // 追加列 选择列
    scoreDataFrame.withColumn("age", scoreDataFrame("number")+20).show()
    scoreDataFrame.select("name","score").show(5)
    // scoreDataFrame.show()
    sc.stop()
  }
  case class Score(name:String,number:String,score:String)
}