package spark.chap3

import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * @author pinker on 2018/6/9
  */
object CountByValueDemo {
  val localPath = "D:/spark/temp/"
  val hivePath = "D:/spark/hive/"

  def flatMapDemo(spark: SparkSession): Unit = {
    val datas = getDataFrame(spark)
    datas.printSchema()
    import spark.implicits._
    datas.map(row => {
      val name = row.getString(0)
      val age = row.getInt(1)
      val gender = row.getString(2).toUpperCase
      val index = row.getInt(3)
      Person(name, age, gender, index)
    }).foreach(p => println(p))
  }

  case class Person(name: String, age: Int, gender: String, index: Int)

  private def getDataFrame(spark: SparkSession): DataFrame = {
    val schmema = StructType(Seq(StructField("name", StringType, true), StructField("age", IntegerType, true),
      StructField("gender", StringType, true), StructField("index", IntegerType, true)))
    val datas = spark.read.format("com.databricks.spark.csv")
      .option("header", "false")
      .schema(schmema)
      .load("src/main/resources/rddData/people.csv")
    datas
  }

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("countByValue")
      .master("local[*]")
      .config("spark.local.dir", localPath)
      .config("spark.sql.warehouse.dir", hivePath)
      .getOrCreate()
    countByValueWithRDD(spark)
    //    flatMapDemo(spark)
  }

  private def countByValueWithRDD(spark: SparkSession) = {
    val datas = spark.sparkContext.parallelize(Array(1, 3, 4, 6, 8, 3, 6, 1, 6, 5))
    datas.countByValue().foreach(println)
    val tupDatas = spark.sparkContext.parallelize(Seq((1, "cool"), (3, "good"), (5, "bad"), (3, "bad")))
    tupDatas.countByValue().foreach(println)
    tupDatas.countByKey().foreach(println)
  }
}
