package com.xiaohu.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo4SourceAPI {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder()
      .master("local")
      .appName("data source api")
      .config("spark.sql.shuffle.partitions", "1")
      .getOrCreate()

    /**
     * 导入隐式转换你
     */
    import org.apache.spark.sql.functions._
    import sparkSession.implicits._

    /**
     * ==========================================读写csv格式的数据==========================================
     */
    //如果是直接调用csv函数读取数据的话，无法做表结构的设置
    //    val df1: DataFrame = sparkSession.read
    //      .csv("spark/data/test1.csv")
    //    //使用format的形式读取数据的同时可以设置表结构
    //    val df2: DataFrame = sparkSession.read
    //      .format("csv")
    //      .schema("id STRING,name STRING,age INT")
    //      .load("spark/data/test1.csv")
    //    df2.show()

    //    val df1: DataFrame = sparkSession.read
    //      .format("csv")
    //      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
    //      .option("sep", ",")
    //      .load("spark/data/students.txt")
    //
    //    df1.createOrReplaceTempView("students")
    //
    //    val resDF1: DataFrame = sparkSession.sql(
    //      """
    //        |select
    //        |clazz,
    //        |count(1) as counts
    //        |from students
    //        |group by clazz
    //        |""".stripMargin)
    //    //以csv格式写出到磁盘文件夹中
    //    resDF1.write
    //      .format("csv")
    ////      .option("sep",",")
    //      .mode(SaveMode.Overwrite)
    //      .save("spark/data/sqlout4")

    /**
     * ==========================================读写json格式的数据==========================================
     */
    //    val df1: DataFrame = sparkSession.read
    //      .json("spark/data/students.json")
    //
    //    df1.groupBy("age")
    //      .agg(count("age") as "counts")
    //      .write
    //      .json("spark/data/sqlout5")

    /**
     * ==========================================读写parquet格式的数据==========================================
     *
     * parquet格式的文件存储，是由【信息熵】决定的
     */
    //    val df1: DataFrame = sparkSession.read
    //      .json("spark/data/students2.json")
    //
    //    //以parquet格式写出去
    //    df1.write
    //      .parquet("spark/data/sqlout7")

    //读取parquet格式的数据
    //    val df2: DataFrame = sparkSession.read
    //      .parquet("spark/data/sqlout7/part-00000-23f5482d-74d5-4569-9bf4-ea0ec91e86dd-c000.snappy.parquet")
    //    df2.show()

    /**
     * ==========================================读写orc格式的数据==========================================
     *
     */
//    val df1: DataFrame = sparkSession.read
//      .json("spark/data/students2.json")
//    df1.write
//      .orc("spark/data/sqlout8")

//    sparkSession.read
//      .orc("spark/data/sqlout8/part-00000-a33e356c-fd1f-4a5e-a87f-1d5b28f6008b-c000.snappy.orc")
//      .show()


    /**
     * ==========================================读写jdbc格式的数据==========================================
     *
     */
    sparkSession.read
      .format("jdbc")
      .option("url", "jdbc:mysql://192.168.44.100:3306/studentdb?useUnicode=true&characterEncoding=UTF-8&useSSL=false")
      .option("dbtable", "studentdb.jd_goods")
      .option("user", "root")
      .option("password", "123456")
      .load()
      .show(10,truncate = false)

  }
}
