package spark

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}

/**
  * @author pinker on 2018/6/9
  */
object CoalesceDemo {
  val localPath = "D:/spark/temp/"
  val hivePath = "D:/spark/hive/"

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("coalesce")
      .config("spark.local.dir", localPath)
      .config("spark.sql.warehouse.dir", hivePath)
      .getOrCreate()

    coalesceDemo(spark)
  }

  private def coalesceDemo(spark: SparkSession) = {
    val schmema = StructType(Seq(StructField("name", StringType, true), StructField("age", IntegerType, true),
      StructField("gender", StringType, true), StructField("index", IntegerType, true)))
    val datas = spark.read
      .format("com.databricks.spark.csv")
      .option("header", "false")
      .schema(schmema)
      .load("src/main/resources/rddData/people.csv")
    import spark.implicits._
    datas.map(row => row.formatted("\t")).foreach(str => println)
    println("原始分区" + datas.rdd.getNumPartitions)
    val datas1 = datas.repartition(4)
    println("repartitions 后的分区" + datas1.rdd.getNumPartitions)
    datas1.printSchema()
    val datas2 = datas1.filter(row => {
      println(row.get(1) + "-" + row.get(2) + row.get(3))
      val age = row.getInt(1)
      age > 20
    })
    datas2.collect().foreach(println)
    val datas3 = datas2.coalesce(2)
    println("coalesce后的大小" + datas3.rdd.getNumPartitions)
  }
}
