package core_sql.day06_sql

import util.FileUtil
import org.apache.spark.sql.{Dataset, Row, SparkSession}

/**
  * spark2.0
  */
object DatasetWordcount {
  def main(args: Array[String]): Unit = {
    //如果在spark2.0里面，想要使用Dataset,DataFrame,SQL 执行入口是sparkSession
    val session: SparkSession = SparkSession.builder()
      .appName("DatasetWordcount")
      .master("local[*]")
      .getOrCreate()

    //制定从哪里读取数据
    val lines: Dataset[String] = session.read.textFile(FileUtil.WORDCOUNT_LOG)

    //导入隐式转换
    import session.implicits._
    val words: Dataset[String] = lines.flatMap(_.split(" "))

    // 引入一些sql的操作
    import org.apache.spark.sql.functions._
    val r: Dataset[Row] = words.groupBy($"value" as "word").agg(count("*") as "counts").sort($"counts" desc)

    //注意：groupBy之后的类型
    //第一个count是在组内进行Count， 第二个count是计算dataframe有多少行
    //val r = words.groupBy($"value" as "word").count().count()

    //val r = words.groupBy($"value" as "word").count().withColumnRenamed("count", "counts").sort($"counts" desc)

    r.show()

    session.stop()



  }

}
