mysql> create database sparktest
mysql> use sparktest
mysql> create table employee (id int(4), name char(20), gender char(4), age int(4))
mysql> insert into employee values(1,'Alice','F',22)
mysql> insert into employee values(2,'John','M',25)
mysql>mkdir -p src/main/scala
mysql>mkdir -p src/main/scalatestmysql.scala
import org.apache.spark.sql.SparkSession
import java.util.Properties
import org.apache.spark.sql.types._
import org.apache.spark.sql.Row
object TestMySQL {
def main(args: Array[String]) {
val spark = SparkSession
.builder
.master("local[*]")
.appName("Simple Application")
.getOrCreate()
val employeeRDD = spark.sparkContext.parallelize(Array("3 Mary F 26","4 Tom M 23")).map(_.split(" "))
val schema = StructType(List(StructField("id", IntegerType, true),StructField("name", StringType, true),StructField("gender", StringType, true),StructField("age", IntegerType, true)))
val rowRDD = employeeRDD.map(p => Row(p(0).toInt,p(1).trim, p(2).trim,p(3).toInt))
val employeeDF = spark.createDataFrame(rowRDD, schema)
val prop = new Properties()
prop.put("user", "root") 
prop.put("password", "hadoop") 
prop.put("driver","com.mysql.jdbc.Driver")
employeeDF.write.mode("append").jdbc("jdbc:mysql://localhost:3306/sparktest", sparktest.employee", prop)
val jdbcDF = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/sparktest").option("driver","com.mysql.jdbc.Driver").option("dbtable","employee").option("user","root").option("password", "hadoop").load()
jdbcDF.agg("age" -> "max", "age" -> "sum")
    }
}
mysql>name := "Simple Project"
mysql>version := "1.0"
mysql>scalaVersion := "2.12.15"
mysql>libraryDependencies += "org.apache.spark" % "spark-core" % "3.2.0"
mysql>libraryDependencies += "org.apache.spark" %% "spark-sql" % "3.2.0"
mysql>/usr/local/sbt/sbt package
mysql>/usr/local/spark/bin/spark-submit --class " TestMySQL "  mysql>/usr/local/spark/mycode/testmysql/target/scala-2.12/simple-project_2.12-1.0.jar



