package cn.jly.bigdata.spark.sql

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

/**
 * @author lanyangji
 * @date 2019/12/1 21:34
 */
object SparkSql06_Load_Save {

  def main(args: Array[String]): Unit = {

    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSql06_Load_Save")
    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    // 默认数据源格式为Parquet，其他需要指定
    //val df: DataFrame = spark.read.json("input/people.json")
    val df: DataFrame = spark.read.format("json").load("input/people.json")
    df.show()

    // rdd读JSON
    val jsonRdd: RDD[String] = spark.sparkContext.textFile("input/people.json")
    import scala.util.parsing.json.JSON
    jsonRdd.map(JSON.parseFull).foreach(println)


    // 保存为Parquet格式的文件
    //df.write.format("parquet").save("out/people.parquet")
    // 保存为csv文件格式
    df.write.format("csv").mode(SaveMode.Append).save("out/people.csv")

    // 读csv文件
    val csvDF: DataFrame = spark.read.format("csv").load("out/people.csv")
    csvDF.show()

    // 可以直接运行sql在文件上
    spark.sql("SELECT * FROM json.`input/people.json`").show()

    // 释放资源
    spark.close()
  }
}
