package cn.doitedu.day08

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, Dataset, Encoder, KeyValueGroupedDataset, SparkSession}

object T03_DataSetDemo {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder().appName(this.getClass.getSimpleName)
      .master("local[4]")
      .getOrCreate()

    val lines: Dataset[String] = spark.read.textFile("data/words.txt")

    //增强的功能，包括Encoder
    import spark.implicits._
    import org.apache.spark.sql.functions._
    val words: Dataset[String] = lines.flatMap(_.split(" "))

    val wordAndOne: DataFrame = words.map((_, 1))
      .withColumnRenamed("_1", "word")
      .withColumnRenamed("_2", "count")


    wordAndOne.groupBy("word")
      .agg(
        sum("count") as "counts"
      )
      .orderBy($"counts" desc)
      .show()

  }
}
