package chapter05

import java.util.Properties

import com.typesafe.config.{Config, ConfigFactory}
import org.apache.spark.sql.{DataFrame, Dataset, SaveMode, SparkSession}

object DataSourceApp {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder()
//      .appName("DataSourceApp")
//      .master("local")
      .getOrCreate()


    //text(spark)
    //json(spark)
    //common(spark)
    //parquet(spark)
    //convert(spark)
    //jdbc(spark)
    jdbc2(spark)

    spark.stop()
  }

  //jdbc
  def jdbc2(spark: SparkSession): Unit = {
    val config: Config = ConfigFactory.load()
    val driver: String = config.getString("db.default.driver")
    val url: String = config.getString("db.default.url")
    val user: String = config.getString("db.default.user")
    val password: String = config.getString("db.default.password")
    val database: String = config.getString("db.default.database")
    val table: String = config.getString("db.default.table")
    val sinkTable: String = config.getString("db.default.sink.table")

    val properties = new Properties()
    properties.put("user", user)
    properties.put("password", password)
    //把代码打包提交到yarn试一试
    //没有driver Exception in thread "main" java.sql.SQLException: No suitable driver
    //添加上driver正常运行
    //properties.put("driver",driver)
    val jdbc2DF: DataFrame = spark.read
      .jdbc(url, s"$database.$table", properties)
    jdbc2DF.show()

    jdbc2DF.write
      .jdbc(url, s"$database.$sinkTable", properties)
  }

  //jdbc
  def jdbc(spark: SparkSession): Unit = {
    //第一种读方法
    import spark.implicits._
    //    val jdbc1DF: DataFrame = spark.read
    //      .format("jdbc")
    //      .option("url", "jdbc:mysql://hadoop000:3306")
    //      .option("dbtable", "myhive.members")
    //      .option("user", "root")
    //      .option("password", "123456")
    //      .load()
    //    jdbc1DF.filter($"username" === "lisi").show()


    //第二种读方法
    val properties = new Properties()
    properties.put("user", "root")
    properties.put("password", "123456")
    val jdbc2DF: DataFrame = spark.read
      .jdbc("jdbc:mysql://hadoop000:3306?useUnicode=true&characterEncoding=utf-8&useSSL=false", "myhive.members", properties)
    //jdbc2DF.show()

    //    jdbc2DF.write
    //      .jdbc("jdbc:mysql://hadoop000:3306","myhive.members1",properties)
  }

  //存储类型转化
  def convert(spark: SparkSession): Unit = {
    import spark.implicits._

    val jsonDF: DataFrame = spark.read
      .format("json")
      .load("file:///D:\\JAVApros\\spark_pk\\data\\people.json")

    jsonDF.write
      .format("parquet")
      .mode(SaveMode.Overwrite)
      .save("out")

    //    spark.read
    //      .parquet("file:///D:\\JAVApros\\spark_pk\\out").show()
    spark.read
      .format("parquet")
      .load("file:///D:\\JAVApros\\spark_pk\\out").show()

  }

  //parquet
  def parquet(spark: SparkSession): Unit = {
    val parquetDF: DataFrame = spark.read
      .format("parquet")
      .load("file:///D:\\JAVApros\\spark_pk\\data\\users.parquet")

    //    parquetDF.printSchema()
    //    parquetDF.show()

    parquetDF.select("name", "favorite_numbers")
      .write
      .mode("overwrite")
      //.option("compression","none") 设置不压缩
      .parquet("out")

    //
    //    val SnappyParquetDF: DataFrame = spark.read
    //      .format("parquet")
    //      .load("file:///D:\\JAVApros\\spark_pk\\out\\part-00000-fa949bd3-b954-44d7-b811-8654949294ec-c000.snappy.parquet")
    //
    //    SnappyParquetDF.show()
  }

  //dataSource标准写法
  def common(spark: SparkSession): Unit = {
    import spark.implicits._
    //源码面前 了无秘密
    val textDF: DataFrame = spark.read
      .format("text")
      .load("file:///D:\\JAVApros\\spark_pk\\data\\people.txt")
    val jsonDF: DataFrame = spark.read
      .format("json")
      .load("file:///D:\\JAVApros\\spark_pk\\data\\people.json")
    //    textDF.show()
    //    jsonDF.show()
    jsonDF
      .write
      .format("json")
      .mode("overwrite")
      .save("out")

  }

  //json
  def json(spark: SparkSession): Unit = {
    import spark.implicits._
    val jsonDF: DataFrame = spark.read.json("file:///D:\\JAVApros\\spark_pk\\data\\people.json")
    //jsonDF.show()

    //todo 年龄只要大于20的数据
    //jsonDF.filter("age>20").select('name).write.mode(SaveMode.Overwrite).json("out")

    val jsonDF2: DataFrame = spark.read.json("file:///D:\\JAVApros\\spark_pk\\data\\people2.json")
    jsonDF2.select($"name", $"age", $"info.work" as ("work"), $"info.home".as("home"))
      .write
      .mode(SaveMode.Overwrite)
      .json("out")
  }

  //文本
  def text(spark: SparkSession): Unit = {
    import spark.implicits._
    val textDf: DataFrame = spark.read.text("file:///D:\\JAVApros\\spark_pk\\data\\people.txt")
    //textDf.show()
    val result: Dataset[(String)] = textDf.map(x => {
      val splits: Array[String] = x.getString(0).split(",")
      (splits(0).trim, splits(1).trim.toInt)
      val builder = new StringBuilder()
      builder.append(splits(0).trim).append(",")
        .append(splits(1).trim)

      builder.toString()
    })

    //todo text不支持int类型，不支持写入多列 需要方法解决
    //todo SaveMode.Overwrite 查看SaveMode方法的实现
    result.write.mode(SaveMode.Overwrite).text("out")
  }

}
