package spark

import org.apache.spark.SparkContext
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{SQLContext, SparkSession}
import spark.DataFrameDemo.{hivePath, localPath}

/**
  * @author pinker on 2018/6/7
  */
object CacheDemo {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[5]")
      .appName("rdd")
      .config("spark.local.dir", localPath)
      .config("spark.sql.warehouse.dir", hivePath)
      .getOrCreate()
    /*测试cache方法是否能够提前计算而不用action,答案是不能,个人认为书上有错误.
       cacheTest(spark)*/
    val sc = spark.sparkContext
    val datas1 = sc.parallelize(Array(1, 2, 3, 4, 5, 6))
    val datas2 = sc.parallelize(Array(6, 5, 4, 3, 2, 1))
    val datas = datas1.cartesian(datas2)
    datas.saveAsTextFile("src/main/resources/rddData/cartesian1.csv")
  }

  private def cacheTest(spark: SparkSession) = {
    val datas = spark.sparkContext.parallelize(Seq("abc", "d", "e", "f"), 2)
    datas.foreach(println)
    println("--------------")
    datas.cache()
    datas.foreach(println)
  }

  /**
    * 下面这个是官方给得标准的csv的读写方法
    *
    * @param sc sparkcontext
    */
  def csvDemo(sc: SparkContext) {
    val sqlContext = new SQLContext(sc)
    val customSchema = StructType(Array(
      StructField("year", IntegerType, true),
      StructField("make", StringType, true),
      StructField("model", StringType, true),
      StructField("comment", StringType, true),
      StructField("blank", StringType, true)))

    val df = sqlContext.read
      .format("com.databricks.spark.csv")
      .option("header", "true") // Use first line of all files as header
      .schema(customSchema)
      .load("cars.csv")

    val selectedData = df.select("year", "model")
    selectedData.write
      .format("com.databricks.spark.csv")
      .option("header", "true")
      .save("newcars.csv")
  }
}
