package sparkSql

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext
import sparkSql.SparkSQL2.personObj

object mysqlSpark {


  def main(args: Array[String]): Unit = {

    // 1) 创建Spark配置和上下文
    val conf = new SparkConf().setMaster("local[*]").setAppName("helloSql")
    val sc = new SparkContext(conf)

    // 2) 创建Spark SQL上下文并设置日志级别
    val sqlCon = new SQLContext(sc)
    sc.setLogLevel("error")

    // 3) 读取文本文件并处理数据
    val lines = sc.textFile("E:\\new.txt")

    // 按照空格拆分每行数据，并转换为personObj类型的RDD
    val personListRdd = lines.map(line => {
      val fields = line.split(",")
      personObj(fields(0), fields(1).toInt, fields(2).toInt)
  })

    // 4) 将RDD转换为DataFrame并展示
    import sqlCon.implicits._
    val personDF = personListRdd.toDF
    personDF.show()

    personDF.write.format("jdbc")
      .option("url","jdbc:mysql://localhost:3306/HUEL?characterEncoding=UTF-8&useSSL=false")
      .option("dbtable","test1")
      .option("user","root")
      .option("password","123456")
      .save()


  }
}
