import cn.doitedu.commons.util.SparkUtil
import org.apache.spark.Partitioner
import org.apache.spark.sql.SparkSession

object ReadParquet {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("demo").getOrCreate()
    import spark.implicits._
    val docs = spark.read.textFile("/path")
    val words = docs.flatMap(line=>{
      line.split(" ")
    }).toDF("word")


    words.createTempView("words")


   val wordcount =  spark.sql(
      """
        |
        |select
        |word,count(1) as cnt
        |
        |from words
        |group by words
        |
        |""".stripMargin)

    // 次数top100 词
    val top100 = wordcount.limit(100)

    wordcount.createTempView("wordcount")
    // 区间词数
    val rangeWords = spark.sql(
      """
        |
        |select
        |rg,
        |count(1) as words
        |from
        |(
        |
        |select
        |
        |word,
        |case
        | when cnt between 1 and 10 then 'range1'
        | when cnt between 11 and 20 then 'range2'
        | when cnt between 21 and 50 then 'range3'
        | else 'range4'
        |end as rg
        |
        |
        |from wordcount
        |)
        |group by rg
        |
        |
        |""".stripMargin)

    import org.apache.spark.sql.functions._
    spark.close()




  }
}
