package dataframe

import java.util.Properties

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}

object DataFrame_WriterTest {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local[*]")
    conf.setAppName("DataFrame_WriterTest")

    val spark: SparkSession = SparkSession
      .builder()
      .config(conf)
      .getOrCreate()

    // 将DataFrame存储到CSV文件
    val df: DataFrame = spark.read.json("data/people.json")
    df.filter(col("age").isNotNull).write.format("csv").save("dfOutput")

    // 将保存的csv数据再次加载到RDD中
    val textFileRDD: RDD[String] = spark.sparkContext.textFile("dfOutput/*")
    textFileRDD.foreach(println)

    // 将DataFrame存储到MySQL表中
    val peopleRowsRDD: RDD[Row] = spark.sparkContext.parallelize(Seq(
      "3,王老五,F,44", "4,赵小虎,M,27"
    ))
      .map(line => {
        val fields = line.split(",")
        Row(fields(0).toInt, fields(1).trim, fields(2).trim, fields(3).toInt)
      })

    val schema: StructType = new StructType(Array(
      StructField("id", IntegerType, false),
      StructField("name", StringType, false),
      StructField("gender", StringType, true),
      StructField("age", IntegerType, true)
    ))
    val peopleDF: DataFrame = spark.createDataFrame(peopleRowsRDD, schema)


    // 将DataFrame数据写出到MySQL数据库
    val jdbc_url="jdbc:mysql://ip地址:3306/spark?useSSL=false&useUnicode=true&characterEncoding=utf8"
    val properties = new Properties()
    properties.setProperty("driver","com.mysql.jdbc.Driver")
    properties.setProperty("user","root")
    properties.setProperty("password","123456")

    peopleDF.write.mode(SaveMode.Overwrite).jdbc(jdbc_url,"student",properties)



    spark.stop()
  }
}
