package com.shujia.sql

import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object DEmo8Dataset {
  def main(args: Array[String]): Unit = {


    //1、创建Spark sql环境
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("sql")
      .config("spark.sql.shuffle.partitions", 1) //默认在集群中时200个
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //读取数据
    val linesDF: DataFrame = spark.read
      .schema("line STRING")
      .option("sep", "|")
      .csv("spark/data/words.txt")


    /**
     * Dataset APi
     * DF底层时Dataset[ROW]
     * Dataset的底层时RDD
     * 三者之间可以相互转换
     * Dataset拥有RDD的算子APi ,也可以使用DSL APi
     */
    //一转换成多行
    val wordsDS: Dataset[String] = linesDF
      .flatMap { case Row(line: String) => line.split(",") }

    //转换成DF,指定列名
    val wordsDF: DataFrame = wordsDS.toDF("word")

    wordsDF
      .groupBy($"word")
      .agg(count($"word"))
      .show()


  }
}
