package com.scala.learn.sparksql2

import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

/**
  * @Copyright: Shanghai Definesys Company.All rights reserved.
  * @Description:
  * @author: chuhaitao
  * @since: 2019/3/9 21:23
  * @history:
  *          1.2019/3/9 created by chuhaitao
  */
object SqlWorldCount2 {


  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("wc")
      .master("local[*]")
      .getOrCreate()

    val lines: DataFrame = spark.read.format("text").load("D:\\tmp\\world.txt")

    import spark.implicits._
    //将lines中的一行按照属性名 "value" 取出来，在切分 ，生成一个DataSet
    val worlds: Dataset[String] = lines.flatMap(_.getAs[String]("value").split(" "))

    /*  import  org.apache.spark.sql.functions._
      worlds.groupBy($"value").agg(count("*")).sort($"count(1)" desc)
  */

    //转化成dataFrame
    val wcdf: DataFrame = worlds.toDF()

    //创建视图
    wcdf.createTempView("word")
    val res1: DataFrame = spark.sql("select value as word,count(*) as count from word group by word order by count ")

    res1.show()
    //写出列存储
    res1.write.parquet("D:\\tmp\\parquet")
   /* res1.foreachPartition(part => {
      //插入数据库，redis
    })*/
  }

}
