package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo8DataSetAPI {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("hive")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val linesDF: DataFrame = spark
      .read
      .format("csv") // 读取数据的格式
      .option("sep", "|") // 字段的分隔符
      .schema("line STRING") // 指定字段名和字段类型
      .load("data/words.txt") //指定读取数据的路径

    /**
     * 使用DataSet api
     */
    val wordsDS: Dataset[String] = linesDF.flatMap {
      case Row(line: String) => line.split(",")
    }

    //转换成kv格式
    val kvDS: Dataset[(String, Int)] = wordsDS.map(word => (word, 1))

    val countDF: DataFrame = kvDS
      .groupBy($"_1" as "word")
      .agg(sum($"_2") as "num")

    countDF.show()

    kvDS
      //转换成df,指定列名
      .toDF("word", "num")
      .groupBy($"word")
      .agg(sum($"num") as "num")
      .show()


  }

}
