package com.hzh.SparkSQL

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo1WordCount {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("sql")
      .getOrCreate()

    /**
     * 1、读取数据构建DataFrame，DF相当于一张表
     */
    val linesDF: DataFrame = spark
      .read
      .format("csv") //指定读取数据的文件格式
      .schema("line STRING") //指定字段名和字段类型
      .option("sep", "\t") //指定分隔符，csv格式默认逗号分隔
      .load("data/words.txt")


    linesDF.printSchema() //打印表结构
    linesDF.show() //打印数据

    /**
     * 2、将DataFrame注册成一个视图，才能写sql
     */
    linesDF.createOrReplaceTempView("lines")

    /**
     * 3、写Spark的SQl来统计单词的数量
     * sparkSQl的语法完全兼容hive sql
     *
     *
     */
    val result: DataFrame = spark.sql(
      """
        |
        |
        |select word,count(1) as c from
        |(select explode(split(line,',')) as word
        |from
        |lines) as a
        |group by word
        |
        |
        |""".stripMargin)

    /**
     * 4、将数据保存到本地
     */
    result
      .write
      .format("csv")//指定文件类型
      .option("sep",",")//数据分隔符
      .mode(SaveMode.Overwrite)//覆盖数据
      .save("data/wc1")


    spark.sql(
      """
        |
        |
        |select words count(1) as counts
        |(select explode(line,',') as words from
        | lines) as a
        |group by words
        |
        |""".stripMargin
    )


  }

}
