package com.shujia.spark.sql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo01SparkSessio {
  def main(args: Array[String]): Unit = {
    // 从Spark2开始的 统一的新的入口 兼顾了Spark Core、SQL、Streaming、MLLib
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo01SparkSessio")
      .master("local")
      // 通过config配置
      //      .config("spark.default.parallelism","2")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()

    // SparkCore以RDD的方式处理数据
    val lineRDD: RDD[String] = spark.sparkContext.textFile("spark/data/words/*")

    lineRDD.flatMap(_.split(",")).map(word => (word, 1)).reduceByKey(_ + _).foreach(println)

    // 如何以SQL方式完成wordCnt统计
    val lineDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", "#")
      .schema("line String")
      .load("spark/data/words/")

    lineDF.createOrReplaceTempView("lineTB")

    spark.sql(
        """
          |select  t1.word
          |        ,count(*) as cnt
          |from (
          |    select  explode(split(line,",")) as word
          |    from lineTB
          |) t1 group by t1.word
          |""".stripMargin)
      .show()


  }


}
