package com.shujia.sql

import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo05Submit {
  /**
   * 提交Spark SQL任务的一般步骤
   * 1、注释掉master
   * 2、修改输入输出路径为HDFS的目录
   * 3、使用spark-submit提交
   *  hdfs dfs -mkdir -p /data/spark/wc/input
   *  hdfs dfs -put words.txt /data/spark/wc/input
   *  spark-submit --class com.shujia.sql.Demo05Submit --master yarn --deploy-mode client --executor-memory 512M --total-executor-cores 1  Spark-1.0.jar
   */
  def main(args: Array[String]): Unit = {
    // 创建SparkSession
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo05Submit")
      .config("spark.sql.shuffle.partitions", 2) // 默认200
      .getOrCreate()

    // 统计每个单词的数量

    // 读取单词数据并构建DataFrame
    val lineDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", "|")
      .schema("line String")
      .load("/data/spark/wc/input")

    //    lineDF.show()

    // 将DataFrame注册成表 即可使用Spark SQL进行查询
    lineDF.createOrReplaceTempView("word_count")

    // SQL 的方式
    spark.sql(
      """
        |SELECT  t1.word
        |        ,count(*) as cnt
        |from (
        |    select  explode(split(line,",")) as word
        |    from word_count
        |) t1 group by t1.word
        |""".stripMargin).show()
  }

}
