package spark

import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.sql.types._
import spark.DataFrameDemo.{hivePath, localPath}

/**
  * @author pinker on 2018/6/7
  */
object FileTypeDemo {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[5]")
      .appName("rdd")
      .config("spark.local.dir", localPath)
      .config("spark.sql.warehouse.dir", hivePath)
      .getOrCreate()
    //    textDemo(spark)
    //    jsonDemo(spark)
    //DS    ,98,f     ,5
    csvDemo(spark)
  }

  private def csvDemo(spark: SparkSession) = {
    val schmema = StructType(Seq(StructField("name", StringType, true), StructField("age", IntegerType, true),
      StructField("gender", StringType, true), StructField("index", IntegerType, true)))
    val datas = spark.read.format("com.databricks.spark.csv")
      .option("header", "false")
      .schema(schmema)
      .load("src/main/resources/rddData/people.csv")
    datas.write.format("com.databricks.spark.csv").option("header", "true").mode(SaveMode.Append).csv("src/main/resources/rddData/peopleWithHeader.csv")
  }

  private def jsonDemo(spark: SparkSession) = {
    val datas = spark.read.format("com.databricks.spark.csv")
      .option("header", "false") // Use first line of all files as header
      .load("src/main/resources/rddData/people.csv")
    datas.write.json("src/main/resources/rddData/people1.json")
    val jDatas = spark.read.json("src/main/resources/rddData/people1.json")
    jDatas.write.json("src/main/resources/rddData/people2.json")
  }

  private def textDemo(spark: SparkSession) = {
    val datas = spark.sparkContext.textFile("src/main/resources/rddData/people.csv")
    datas.foreach(println)
    val wholesData = spark.sparkContext.wholeTextFiles("src/main/resources/rddData/cartesian.csv")
    wholesData.mapValues(x => x).foreach(println)

    //Spark将传入的路径当做目录，会在目录下输出多个文件。我们不能控制数据的哪一部分输出到哪个文件中，不过有些输出格式支持控制
    wholesData.saveAsTextFile("src/main/resources/rddData/cartesian2.csv")

    //下面是DataFrame格式的,常用!
    val schema = StructType(Seq(StructField("col1", IntegerType, true), StructField("col2", IntegerType, true)))
    import spark.implicits._
    val df = spark.createDataset(Seq(1 to 10: _*)).map(num => num.toString).toDF()
    df.write.text("src/main/resources/rddData/cartesian.txt")
  }
}
