package org.example

import org.apache.spark.sql.SparkSession

import java.util.Properties

object JDBC {
  def main(args: Array[String]): Unit = {
    // 创建spark运行环境
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
//  连接MySQL数据库
    val properties: Properties = new Properties();
    properties.setProperty("user","root")
    properties.setProperty("password","123456");
    properties.setProperty("driver","com.mysql.jdbc.Driver")  //8.0版本要在mysql后加.cj
    val mysqlScore = spark.read.jdbc("jdbc:mysql://localhost:3306/test?" +
      "verifyServerCertificate=false&useSSL=false","spark",properties)
    mysqlScore.show()
    mysqlScore.createTempView("spark")
//  使用Navicat导入平时成绩，在spark中读取并计算平均分
//  往MySQL写入数据 写入行 写入列
/*    val data: RDD[String] = sc.makeRDD(Array("张三,1001,100","李四,1002,99"))
// 转换表结构
// 1.按MySQL列名切分数据
    val dataRDD = data.map(_.split(","))
// 2.匹配样例类
    val scoreRDD = dataRDD.map(x => Score(x(0),x(1),x(2)))
// 3.将RDD转换为DataFrame
    import spark.implicits._
    val dataDF = scoreRDD.toDF()
    dataDF.write.mode("append").jdbc("jdbc:mysql://localhost:3306/test?" +
      "verifyServerCertificate=false&useSSL=false", "spark", properties)
    mysqlScore.show()
*/
// 读取平时成绩csv文件并写入mysql数据库的spark表中


    sc.stop()
  }
  case class Score(name:String,number:String,score:String)
}
