package com.offcn.bigdata.sql.p1

import java.util.Properties

import org.apache.spark.sql.{SaveMode, SparkSession}

/**
  * 统一的数据加载和存储
  *     加载：read.load
  *     存储：write.save
  *         默认的load和save的文件格式：parquet
  */
object _04UniformDataLoadAndSaveOps {
    def main(args: Array[String]): Unit = {
        val spark = SparkSession.builder()
            .appName("_04UniformDataLoadAndSaveOps")
            .master("local[*]")
            .getOrCreate()
        //load
        var df = spark.read.
                    format("json").//指定文件的格式
                    load("file:/E:/data/spark/sql/people.json")
        df = spark.read.json("file:/E:/data/spark/sql/people.json")
        df = spark.read.parquet("file:/E:/data/spark/sql/sqldf.parquet")
        df = spark.read.text("file:/E:/data/spark/sql/people.txt")
        df = spark.read.csv("file:/E:/data/spark/sql/country.csv").toDF("id", "country", "shorthand")
        df = spark.read
                .option("header", true)
                .option("sep", "|")
                .csv("file:/E:/data/spark/sql/location-info.csv")
        val url = "jdbc:mysql://10.10.63.80:3306/test"
        val properties = new Properties()
        properties.setProperty("user", "root")
        properties.setProperty("password", "sorry")
        df = spark.read.jdbc(url, "user", properties)
        df.show()
        println("--------------------save-----------------------")
        /*
            数据存储的四种格式：
               ErrorIfExists（默认）
               Append
               Ignore   ： 如果目录不存在，创建；如果存在，忽略
               OverWrite
         */
        df.write.mode(SaveMode.Ignore).csv("file:/E:/out/spark/sql")
        spark.stop()
    }
}
